prompt
stringlengths
162
4.26M
response
stringlengths
109
5.16M
Generate the Verilog code corresponding to the following Chisel files. File SwitchAllocator.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class SwitchAllocReq(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams]) (implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val tail = Bool() } class SwitchArbiter(inN: Int, outN: Int, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Module { val io = IO(new Bundle { val in = Flipped(Vec(inN, Decoupled(new SwitchAllocReq(outParams, egressParams)))) val out = Vec(outN, Decoupled(new SwitchAllocReq(outParams, egressParams))) val chosen_oh = Vec(outN, Output(UInt(inN.W))) }) val lock = Seq.fill(outN) { RegInit(0.U(inN.W)) } val unassigned = Cat(io.in.map(_.valid).reverse) & ~(lock.reduce(_|_)) val mask = RegInit(0.U(inN.W)) val choices = Wire(Vec(outN, UInt(inN.W))) var sel = PriorityEncoderOH(Cat(unassigned, unassigned & ~mask)) for (i <- 0 until outN) { choices(i) := sel | (sel >> inN) sel = PriorityEncoderOH(unassigned & ~choices(i)) } io.in.foreach(_.ready := false.B) var chosens = 0.U(inN.W) val in_tails = Cat(io.in.map(_.bits.tail).reverse) for (i <- 0 until outN) { val in_valids = Cat((0 until inN).map { j => io.in(j).valid && !chosens(j) }.reverse) val chosen = Mux((in_valids & lock(i) & ~chosens).orR, lock(i), choices(i)) io.chosen_oh(i) := chosen io.out(i).valid := (in_valids & chosen).orR io.out(i).bits := Mux1H(chosen, io.in.map(_.bits)) for (j <- 0 until inN) { when (chosen(j) && io.out(i).ready) { io.in(j).ready := true.B } } chosens = chosens | chosen when (io.out(i).fire) { lock(i) := chosen & ~in_tails } } when (io.out(0).fire) { mask := (0 until inN).map { i => (io.chosen_oh(0) >> i) }.reduce(_|_) } .otherwise { mask := Mux(~mask === 0.U, 0.U, (mask << 1) | 1.U(1.W)) } } class SwitchAllocator( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams { val io = IO(new Bundle { val req = MixedVec(allInParams.map(u => Vec(u.destSpeedup, Flipped(Decoupled(new SwitchAllocReq(outParams, egressParams)))))) val credit_alloc = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Output(new OutputCreditAlloc))}) val switch_sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup, MixedVec(allInParams.map { i => Vec(i.destSpeedup, Output(Bool())) })) }) }) val nInputChannels = allInParams.map(_.nVirtualChannels).sum val arbs = allOutParams.map { oP => Module(new SwitchArbiter( allInParams.map(_.destSpeedup).reduce(_+_), oP.srcSpeedup, outParams, egressParams ))} arbs.foreach(_.io.out.foreach(_.ready := true.B)) var idx = 0 io.req.foreach(_.foreach { o => val fires = Wire(Vec(arbs.size, Bool())) arbs.zipWithIndex.foreach { case (a,i) => a.io.in(idx).valid := o.valid && o.bits.vc_sel(i).reduce(_||_) a.io.in(idx).bits := o.bits fires(i) := a.io.in(idx).fire } o.ready := fires.reduce(_||_) idx += 1 }) for (i <- 0 until nAllOutputs) { for (j <- 0 until allOutParams(i).srcSpeedup) { idx = 0 for (m <- 0 until nAllInputs) { for (n <- 0 until allInParams(m).destSpeedup) { io.switch_sel(i)(j)(m)(n) := arbs(i).io.in(idx).valid && arbs(i).io.chosen_oh(j)(idx) && arbs(i).io.out(j).valid idx += 1 } } } } io.credit_alloc.foreach(_.foreach(_.alloc := false.B)) io.credit_alloc.foreach(_.foreach(_.tail := false.B)) (arbs zip io.credit_alloc).zipWithIndex.map { case ((a,i),t) => for (j <- 0 until i.size) { for (k <- 0 until a.io.out.size) { when (a.io.out(k).valid && a.io.out(k).bits.vc_sel(t)(j)) { i(j).alloc := true.B i(j).tail := a.io.out(k).bits.tail } } } } }
module SwitchAllocator_39( // @[SwitchAllocator.scala:64:7] input clock, // @[SwitchAllocator.scala:64:7] input reset, // @[SwitchAllocator.scala:64:7] output io_req_2_0_ready, // @[SwitchAllocator.scala:74:14] input io_req_2_0_valid, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_4, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_1, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_tail, // @[SwitchAllocator.scala:74:14] output io_req_1_0_ready, // @[SwitchAllocator.scala:74:14] input io_req_1_0_valid, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_4, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_1, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_tail, // @[SwitchAllocator.scala:74:14] output io_req_0_0_ready, // @[SwitchAllocator.scala:74:14] input io_req_0_0_valid, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_4, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_1, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_tail, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_4_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_1_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_4_alloc, // @[SwitchAllocator.scala:74:14] output io_switch_sel_2_0_2_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_2_0_1_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_2_0_0_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_1_0_2_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_1_0_1_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_1_0_0_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_0_0_2_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_0_0_1_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_0_0_0_0 // @[SwitchAllocator.scala:74:14] ); wire _arbs_2_io_in_0_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_in_1_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_in_2_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:83:45] wire [2:0] _arbs_2_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_in_0_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_in_1_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_in_2_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_1; // @[SwitchAllocator.scala:83:45] wire [2:0] _arbs_1_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_in_0_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_in_1_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_in_2_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:83:45] wire [2:0] _arbs_0_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45] wire arbs_0_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:95:37] wire arbs_1_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_1_1; // @[SwitchAllocator.scala:95:37] wire arbs_2_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:95:37] wire arbs_0_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:95:37] wire arbs_1_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_1_1; // @[SwitchAllocator.scala:95:37] wire arbs_2_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:95:37] wire arbs_0_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:95:37] wire arbs_1_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_1_1; // @[SwitchAllocator.scala:95:37] wire arbs_2_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:95:37] SwitchArbiter_228 arbs_0 ( // @[SwitchAllocator.scala:83:45] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_0_io_in_0_ready), .io_in_0_valid (arbs_0_io_in_0_valid), // @[SwitchAllocator.scala:95:37] .io_in_0_bits_vc_sel_2_4 (io_req_0_0_bits_vc_sel_2_4), .io_in_0_bits_vc_sel_1_1 (io_req_0_0_bits_vc_sel_1_1), .io_in_0_bits_vc_sel_0_4 (io_req_0_0_bits_vc_sel_0_4), .io_in_0_bits_tail (io_req_0_0_bits_tail), .io_in_1_ready (_arbs_0_io_in_1_ready), .io_in_1_valid (arbs_0_io_in_1_valid), // @[SwitchAllocator.scala:95:37] .io_in_1_bits_vc_sel_2_4 (io_req_1_0_bits_vc_sel_2_4), .io_in_1_bits_vc_sel_1_1 (io_req_1_0_bits_vc_sel_1_1), .io_in_1_bits_vc_sel_0_4 (io_req_1_0_bits_vc_sel_0_4), .io_in_1_bits_tail (io_req_1_0_bits_tail), .io_in_2_ready (_arbs_0_io_in_2_ready), .io_in_2_valid (arbs_0_io_in_2_valid), // @[SwitchAllocator.scala:95:37] .io_in_2_bits_vc_sel_2_4 (io_req_2_0_bits_vc_sel_2_4), .io_in_2_bits_vc_sel_1_1 (io_req_2_0_bits_vc_sel_1_1), .io_in_2_bits_vc_sel_0_4 (io_req_2_0_bits_vc_sel_0_4), .io_in_2_bits_tail (io_req_2_0_bits_tail), .io_out_0_valid (_arbs_0_io_out_0_valid), .io_out_0_bits_vc_sel_2_4 (/* unused */), .io_out_0_bits_vc_sel_1_1 (/* unused */), .io_out_0_bits_vc_sel_0_4 (_arbs_0_io_out_0_bits_vc_sel_0_4), .io_chosen_oh_0 (_arbs_0_io_chosen_oh_0) ); // @[SwitchAllocator.scala:83:45] SwitchArbiter_228 arbs_1 ( // @[SwitchAllocator.scala:83:45] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_1_io_in_0_ready), .io_in_0_valid (arbs_1_io_in_0_valid), // @[SwitchAllocator.scala:95:37] .io_in_0_bits_vc_sel_2_4 (io_req_0_0_bits_vc_sel_2_4), .io_in_0_bits_vc_sel_1_1 (io_req_0_0_bits_vc_sel_1_1), .io_in_0_bits_vc_sel_0_4 (io_req_0_0_bits_vc_sel_0_4), .io_in_0_bits_tail (io_req_0_0_bits_tail), .io_in_1_ready (_arbs_1_io_in_1_ready), .io_in_1_valid (arbs_1_io_in_1_valid), // @[SwitchAllocator.scala:95:37] .io_in_1_bits_vc_sel_2_4 (io_req_1_0_bits_vc_sel_2_4), .io_in_1_bits_vc_sel_1_1 (io_req_1_0_bits_vc_sel_1_1), .io_in_1_bits_vc_sel_0_4 (io_req_1_0_bits_vc_sel_0_4), .io_in_1_bits_tail (io_req_1_0_bits_tail), .io_in_2_ready (_arbs_1_io_in_2_ready), .io_in_2_valid (arbs_1_io_in_2_valid), // @[SwitchAllocator.scala:95:37] .io_in_2_bits_vc_sel_2_4 (io_req_2_0_bits_vc_sel_2_4), .io_in_2_bits_vc_sel_1_1 (io_req_2_0_bits_vc_sel_1_1), .io_in_2_bits_vc_sel_0_4 (io_req_2_0_bits_vc_sel_0_4), .io_in_2_bits_tail (io_req_2_0_bits_tail), .io_out_0_valid (_arbs_1_io_out_0_valid), .io_out_0_bits_vc_sel_2_4 (/* unused */), .io_out_0_bits_vc_sel_1_1 (_arbs_1_io_out_0_bits_vc_sel_1_1), .io_out_0_bits_vc_sel_0_4 (/* unused */), .io_chosen_oh_0 (_arbs_1_io_chosen_oh_0) ); // @[SwitchAllocator.scala:83:45] SwitchArbiter_228 arbs_2 ( // @[SwitchAllocator.scala:83:45] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_2_io_in_0_ready), .io_in_0_valid (arbs_2_io_in_0_valid), // @[SwitchAllocator.scala:95:37] .io_in_0_bits_vc_sel_2_4 (io_req_0_0_bits_vc_sel_2_4), .io_in_0_bits_vc_sel_1_1 (io_req_0_0_bits_vc_sel_1_1), .io_in_0_bits_vc_sel_0_4 (io_req_0_0_bits_vc_sel_0_4), .io_in_0_bits_tail (io_req_0_0_bits_tail), .io_in_1_ready (_arbs_2_io_in_1_ready), .io_in_1_valid (arbs_2_io_in_1_valid), // @[SwitchAllocator.scala:95:37] .io_in_1_bits_vc_sel_2_4 (io_req_1_0_bits_vc_sel_2_4), .io_in_1_bits_vc_sel_1_1 (io_req_1_0_bits_vc_sel_1_1), .io_in_1_bits_vc_sel_0_4 (io_req_1_0_bits_vc_sel_0_4), .io_in_1_bits_tail (io_req_1_0_bits_tail), .io_in_2_ready (_arbs_2_io_in_2_ready), .io_in_2_valid (arbs_2_io_in_2_valid), // @[SwitchAllocator.scala:95:37] .io_in_2_bits_vc_sel_2_4 (io_req_2_0_bits_vc_sel_2_4), .io_in_2_bits_vc_sel_1_1 (io_req_2_0_bits_vc_sel_1_1), .io_in_2_bits_vc_sel_0_4 (io_req_2_0_bits_vc_sel_0_4), .io_in_2_bits_tail (io_req_2_0_bits_tail), .io_out_0_valid (_arbs_2_io_out_0_valid), .io_out_0_bits_vc_sel_2_4 (_arbs_2_io_out_0_bits_vc_sel_2_4), .io_out_0_bits_vc_sel_1_1 (/* unused */), .io_out_0_bits_vc_sel_0_4 (/* unused */), .io_chosen_oh_0 (_arbs_2_io_chosen_oh_0) ); // @[SwitchAllocator.scala:83:45] assign io_req_2_0_ready = _arbs_0_io_in_2_ready & arbs_0_io_in_2_valid | _arbs_1_io_in_2_ready & arbs_1_io_in_2_valid | _arbs_2_io_in_2_ready & arbs_2_io_in_2_valid; // @[Decoupled.scala:51:35] assign io_req_1_0_ready = _arbs_0_io_in_1_ready & arbs_0_io_in_1_valid | _arbs_1_io_in_1_ready & arbs_1_io_in_1_valid | _arbs_2_io_in_1_ready & arbs_2_io_in_1_valid; // @[Decoupled.scala:51:35] assign io_req_0_0_ready = _arbs_0_io_in_0_ready & arbs_0_io_in_0_valid | _arbs_1_io_in_0_ready & arbs_1_io_in_0_valid | _arbs_2_io_in_0_ready & arbs_2_io_in_0_valid; // @[Decoupled.scala:51:35] assign io_credit_alloc_2_4_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_1_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_1; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_4_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_switch_sel_2_0_2_0 = arbs_2_io_in_2_valid & _arbs_2_io_chosen_oh_0[2] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_2_0_1_0 = arbs_2_io_in_1_valid & _arbs_2_io_chosen_oh_0[1] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_2_0_0_0 = arbs_2_io_in_0_valid & _arbs_2_io_chosen_oh_0[0] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_1_0_2_0 = arbs_1_io_in_2_valid & _arbs_1_io_chosen_oh_0[2] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_1_0_1_0 = arbs_1_io_in_1_valid & _arbs_1_io_chosen_oh_0[1] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_1_0_0_0 = arbs_1_io_in_0_valid & _arbs_1_io_chosen_oh_0[0] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_0_0_2_0 = arbs_0_io_in_2_valid & _arbs_0_io_chosen_oh_0[2] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_0_0_1_0 = arbs_0_io_in_1_valid & _arbs_0_io_chosen_oh_0[1] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_0_0_0_0 = arbs_0_io_in_0_valid & _arbs_0_io_chosen_oh_0[0] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie2_is1_oe8_os24_44(); // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [26:0] adjustedSig = 27'h2000000; // @[RoundAnyRawFNToRecFN.scala:114:22] wire [22:0] _common_fractOut_T = 23'h400000; // @[RoundAnyRawFNToRecFN.scala:139:28] wire [8:0] _expOut_T_2 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14] wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14] wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14] wire [8:0] _expOut_T_12 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14] wire [8:0] _expOut_T_1 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_11 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_18 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _expOut_T_20 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16] wire [8:0] _sAdjustedExp_T_1 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] common_expOut = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _common_expOut_T = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _common_expOut_T_2 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_3 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_7 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_10 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_13 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_15 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_17 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] _expOut_T_19 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [8:0] expOut = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73] wire [22:0] common_fractOut = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [22:0] _common_fractOut_T_1 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [22:0] _common_fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [22:0] _fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [22:0] _fractOut_T_3 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [22:0] fractOut = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13] wire [9:0] _sAdjustedExp_T = 10'h100; // @[RoundAnyRawFNToRecFN.scala:104:25, :136:55, :286:23] wire [9:0] sAdjustedExp = 10'h100; // @[RoundAnyRawFNToRecFN.scala:106:31, :136:55, :286:23] wire [9:0] _common_expOut_T_1 = 10'h100; // @[RoundAnyRawFNToRecFN.scala:136:55, :286:23] wire [9:0] _io_out_T = 10'h100; // @[RoundAnyRawFNToRecFN.scala:136:55, :286:23] wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23] wire [3:0] _io_exceptionFlags_T_2 = 4'h0; // @[RoundAnyRawFNToRecFN.scala:288:53] wire [4:0] io_exceptionFlags = 5'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:66] wire [4:0] _io_exceptionFlags_T_3 = 5'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:66] wire [32:0] io_out = 33'h80000000; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :286:33] wire [32:0] _io_out_T_1 = 33'h80000000; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :286:33] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire _roundMagUp_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire _commonCase_T_3 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire commonCase = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41] wire [2:0] _io_exceptionFlags_T_1 = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41] wire [1:0] io_in_sig = 2'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16] wire [3:0] io_in_sExp = 4'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16] wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isZero = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53] wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53] wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53] wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53] wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53] wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27] wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63] wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42] wire common_overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:124:37] wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37] wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37] wire common_inexact = 1'h0; // @[RoundAnyRawFNToRecFN.scala:127:37] wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34] wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49] wire overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:238:32] wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32] wire _inexact_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:240:43] wire inexact = 1'h0; // @[RoundAnyRawFNToRecFN.scala:240:28] wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20] wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42] wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39] wire _notNaN_isInfOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:45] wire notNaN_isInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:32] wire signOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:250:22] wire _expOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:253:32] wire _fractOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:280:22] wire _fractOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:280:38] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TLBuffer_a14d64s7k1z4u( // @[Buffer.scala:40:9] input clock, // @[Buffer.scala:40:9] input reset, // @[Buffer.scala:40:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [13:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [13:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21] wire [2:0] _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] wire [1:0] _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21] wire [3:0] _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21] wire [6:0] _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] wire _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21] TLMonitor_21 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (_nodeOut_a_q_io_enq_ready), // @[Decoupled.scala:362:21] .io_in_a_valid (auto_in_a_valid), .io_in_a_bits_opcode (auto_in_a_bits_opcode), .io_in_a_bits_param (auto_in_a_bits_param), .io_in_a_bits_size (auto_in_a_bits_size), .io_in_a_bits_source (auto_in_a_bits_source), .io_in_a_bits_address (auto_in_a_bits_address), .io_in_a_bits_mask (auto_in_a_bits_mask), .io_in_a_bits_corrupt (auto_in_a_bits_corrupt), .io_in_d_ready (auto_in_d_ready), .io_in_d_valid (_nodeIn_d_q_io_deq_valid), // @[Decoupled.scala:362:21] .io_in_d_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), // @[Decoupled.scala:362:21] .io_in_d_bits_param (_nodeIn_d_q_io_deq_bits_param), // @[Decoupled.scala:362:21] .io_in_d_bits_size (_nodeIn_d_q_io_deq_bits_size), // @[Decoupled.scala:362:21] .io_in_d_bits_source (_nodeIn_d_q_io_deq_bits_source), // @[Decoupled.scala:362:21] .io_in_d_bits_sink (_nodeIn_d_q_io_deq_bits_sink), // @[Decoupled.scala:362:21] .io_in_d_bits_denied (_nodeIn_d_q_io_deq_bits_denied), // @[Decoupled.scala:362:21] .io_in_d_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt) // @[Decoupled.scala:362:21] ); // @[Nodes.scala:27:25] Queue2_TLBundleA_a14d64s7k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (_nodeOut_a_q_io_enq_ready), .io_enq_valid (auto_in_a_valid), .io_enq_bits_opcode (auto_in_a_bits_opcode), .io_enq_bits_param (auto_in_a_bits_param), .io_enq_bits_size (auto_in_a_bits_size), .io_enq_bits_source (auto_in_a_bits_source), .io_enq_bits_address (auto_in_a_bits_address), .io_enq_bits_mask (auto_in_a_bits_mask), .io_enq_bits_data (auto_in_a_bits_data), .io_enq_bits_corrupt (auto_in_a_bits_corrupt), .io_deq_ready (auto_out_a_ready), .io_deq_valid (auto_out_a_valid), .io_deq_bits_opcode (auto_out_a_bits_opcode), .io_deq_bits_param (auto_out_a_bits_param), .io_deq_bits_size (auto_out_a_bits_size), .io_deq_bits_source (auto_out_a_bits_source), .io_deq_bits_address (auto_out_a_bits_address), .io_deq_bits_mask (auto_out_a_bits_mask), .io_deq_bits_data (auto_out_a_bits_data), .io_deq_bits_corrupt (auto_out_a_bits_corrupt) ); // @[Decoupled.scala:362:21] Queue2_TLBundleD_a14d64s7k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (auto_out_d_ready), .io_enq_valid (auto_out_d_valid), .io_enq_bits_opcode (auto_out_d_bits_opcode), .io_enq_bits_size (auto_out_d_bits_size), .io_enq_bits_source (auto_out_d_bits_source), .io_enq_bits_corrupt (auto_out_d_bits_corrupt), .io_deq_ready (auto_in_d_ready), .io_deq_valid (_nodeIn_d_q_io_deq_valid), .io_deq_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), .io_deq_bits_param (_nodeIn_d_q_io_deq_bits_param), .io_deq_bits_size (_nodeIn_d_q_io_deq_bits_size), .io_deq_bits_source (_nodeIn_d_q_io_deq_bits_source), .io_deq_bits_sink (_nodeIn_d_q_io_deq_bits_sink), .io_deq_bits_denied (_nodeIn_d_q_io_deq_bits_denied), .io_deq_bits_data (auto_in_d_bits_data), .io_deq_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt) ); // @[Decoupled.scala:362:21] assign auto_in_a_ready = _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21] assign auto_in_d_valid = _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21] assign auto_in_d_bits_opcode = _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] assign auto_in_d_bits_param = _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21] assign auto_in_d_bits_size = _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21] assign auto_in_d_bits_source = _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21] assign auto_in_d_bits_sink = _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21] assign auto_in_d_bits_denied = _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21] assign auto_in_d_bits_corrupt = _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_31( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [27:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [9:0] inflight; // @[Monitor.scala:614:27] reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [39:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [9:0] inflight_1; // @[Monitor.scala:726:35] reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File MulRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to Chisel by Andrew Waterman). Copyright 2019, 2020 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1)) }) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf) val notNaN_isInfOut = io.a.isInf || io.b.isInf val notNaN_isZeroOut = io.a.isZero || io.b.isZero val notNaN_signOut = io.a.sign ^ io.b.sign val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc io.rawOut.isInf := notNaN_isInfOut io.rawOut.isZero := notNaN_isZeroOut io.rawOut.sExp := common_sExpOut io.rawOut.isNaN := io.a.isNaN || io.b.isNaN io.rawOut.sign := notNaN_signOut io.rawOut.sig := common_sigOut } class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth)) mulFullRaw.io.a := io.a mulFullRaw.io.b := io.b io.invalidExc := mulFullRaw.io.invalidExc io.rawOut := mulFullRaw.io.rawOut io.rawOut.sig := { val sig = mulFullRaw.io.rawOut.sig Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR) } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(UInt((expWidth + sigWidth + 1).W)) val b = Input(UInt((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(Bool()) val out = Output(UInt((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(UInt(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulRawFN = Module(new MulRawFN(expWidth, sigWidth)) mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a) mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc roundRawFNToRecFN.io.infiniteExc := false.B roundRawFNToRecFN.io.in := mulRawFN.io.rawOut roundRawFNToRecFN.io.roundingMode := io.roundingMode roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags }
module MulRawFN_20( // @[MulRecFN.scala:75:7] input io_a_isNaN, // @[MulRecFN.scala:77:16] input io_a_isInf, // @[MulRecFN.scala:77:16] input io_a_isZero, // @[MulRecFN.scala:77:16] input io_a_sign, // @[MulRecFN.scala:77:16] input [9:0] io_a_sExp, // @[MulRecFN.scala:77:16] input [24:0] io_a_sig, // @[MulRecFN.scala:77:16] input io_b_isNaN, // @[MulRecFN.scala:77:16] input io_b_isInf, // @[MulRecFN.scala:77:16] input io_b_isZero, // @[MulRecFN.scala:77:16] input io_b_sign, // @[MulRecFN.scala:77:16] input [9:0] io_b_sExp, // @[MulRecFN.scala:77:16] input [24:0] io_b_sig, // @[MulRecFN.scala:77:16] output io_invalidExc, // @[MulRecFN.scala:77:16] output io_rawOut_isNaN, // @[MulRecFN.scala:77:16] output io_rawOut_isInf, // @[MulRecFN.scala:77:16] output io_rawOut_isZero, // @[MulRecFN.scala:77:16] output io_rawOut_sign, // @[MulRecFN.scala:77:16] output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:77:16] output [26:0] io_rawOut_sig // @[MulRecFN.scala:77:16] ); wire [47:0] _mulFullRaw_io_rawOut_sig; // @[MulRecFN.scala:84:28] wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:75:7] wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:75:7] wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:75:7] wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:75:7] wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:75:7] wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:75:7] wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:75:7] wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:75:7] wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:75:7] wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:75:7] wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:75:7] wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:75:7] wire [26:0] _io_rawOut_sig_T_3; // @[MulRecFN.scala:93:10] wire io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7] wire io_rawOut_isInf_0; // @[MulRecFN.scala:75:7] wire io_rawOut_isZero_0; // @[MulRecFN.scala:75:7] wire io_rawOut_sign_0; // @[MulRecFN.scala:75:7] wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:75:7] wire [26:0] io_rawOut_sig_0; // @[MulRecFN.scala:75:7] wire io_invalidExc_0; // @[MulRecFN.scala:75:7] wire [25:0] _io_rawOut_sig_T = _mulFullRaw_io_rawOut_sig[47:22]; // @[MulRecFN.scala:84:28, :93:15] wire [21:0] _io_rawOut_sig_T_1 = _mulFullRaw_io_rawOut_sig[21:0]; // @[MulRecFN.scala:84:28, :93:37] wire _io_rawOut_sig_T_2 = |_io_rawOut_sig_T_1; // @[MulRecFN.scala:93:{37,55}] assign _io_rawOut_sig_T_3 = {_io_rawOut_sig_T, _io_rawOut_sig_T_2}; // @[MulRecFN.scala:93:{10,15,55}] assign io_rawOut_sig_0 = _io_rawOut_sig_T_3; // @[MulRecFN.scala:75:7, :93:10] MulFullRawFN_20 mulFullRaw ( // @[MulRecFN.scala:84:28] .io_a_isNaN (io_a_isNaN_0), // @[MulRecFN.scala:75:7] .io_a_isInf (io_a_isInf_0), // @[MulRecFN.scala:75:7] .io_a_isZero (io_a_isZero_0), // @[MulRecFN.scala:75:7] .io_a_sign (io_a_sign_0), // @[MulRecFN.scala:75:7] .io_a_sExp (io_a_sExp_0), // @[MulRecFN.scala:75:7] .io_a_sig (io_a_sig_0), // @[MulRecFN.scala:75:7] .io_b_isNaN (io_b_isNaN_0), // @[MulRecFN.scala:75:7] .io_b_isInf (io_b_isInf_0), // @[MulRecFN.scala:75:7] .io_b_isZero (io_b_isZero_0), // @[MulRecFN.scala:75:7] .io_b_sign (io_b_sign_0), // @[MulRecFN.scala:75:7] .io_b_sExp (io_b_sExp_0), // @[MulRecFN.scala:75:7] .io_b_sig (io_b_sig_0), // @[MulRecFN.scala:75:7] .io_invalidExc (io_invalidExc_0), .io_rawOut_isNaN (io_rawOut_isNaN_0), .io_rawOut_isInf (io_rawOut_isInf_0), .io_rawOut_isZero (io_rawOut_isZero_0), .io_rawOut_sign (io_rawOut_sign_0), .io_rawOut_sExp (io_rawOut_sExp_0), .io_rawOut_sig (_mulFullRaw_io_rawOut_sig) ); // @[MulRecFN.scala:84:28] assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:75:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_370( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_114 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File INToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val signedIn = Input(Bool()) val in = Input(Bits(intWidth.W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in); val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( intAsRawFloat.expWidth, intWidth, expWidth, sigWidth, flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows )) roundAnyRawFNToRecFN.io.invalidExc := false.B roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := intAsRawFloat roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File rawFloatFromIN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object rawFloatFromIN { def apply(signedIn: Bool, in: Bits): RawFloat = { val expWidth = log2Up(in.getWidth) + 1 //*** CHANGE THIS; CAN BE VERY LARGE: val extIntWidth = 1<<(expWidth - 1) val sign = signedIn && in(in.getWidth - 1) val absIn = Mux(sign, -in.asUInt, in.asUInt) val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0) val adjustedNormDist = countLeadingZeros(extAbsIn) val sig = (extAbsIn<<adjustedNormDist)( extIntWidth - 1, extIntWidth - in.getWidth) val out = Wire(new RawFloat(expWidth, in.getWidth)) out.isNaN := false.B out.isInf := false.B out.isZero := ! sig(in.getWidth - 1) out.sign := sign out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext out.sig := sig out } }
module INToRecFN_i1_e8_s24_38(); // @[INToRecFN.scala:43:7] wire [1:0] _intAsRawFloat_absIn_T = 2'h3; // @[rawFloatFromIN.scala:52:31] wire [2:0] _intAsRawFloat_extAbsIn_T = 3'h1; // @[rawFloatFromIN.scala:53:44] wire [2:0] _intAsRawFloat_sig_T = 3'h2; // @[rawFloatFromIN.scala:56:22] wire [2:0] _intAsRawFloat_out_sExp_T_2 = 3'h4; // @[rawFloatFromIN.scala:64:33] wire [3:0] intAsRawFloat_sExp = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72] wire [3:0] _intAsRawFloat_out_sExp_T_3 = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72] wire [1:0] intAsRawFloat_extAbsIn = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20] wire [1:0] intAsRawFloat_sig = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20] wire [4:0] io_exceptionFlags = 5'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire [32:0] io_out = 33'h80000000; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire [2:0] io_roundingMode = 3'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire io_in = 1'h1; // @[Mux.scala:50:70] wire io_detectTininess = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_sign_T = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_absIn_T_1 = 1'h1; // @[Mux.scala:50:70] wire intAsRawFloat_absIn = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_adjustedNormDist_T = 1'h1; // @[Mux.scala:50:70] wire intAsRawFloat_adjustedNormDist = 1'h1; // @[Mux.scala:50:70] wire intAsRawFloat_sig_0 = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_out_isZero_T = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_out_sExp_T = 1'h1; // @[Mux.scala:50:70] wire io_signedIn = 1'h0; // @[INToRecFN.scala:43:7] wire intAsRawFloat_sign = 1'h0; // @[rawFloatFromIN.scala:51:29] wire _intAsRawFloat_adjustedNormDist_T_1 = 1'h0; // @[primitives.scala:91:52] wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isZero = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_sign_0 = 1'h0; // @[rawFloatFromIN.scala:59:23] wire _intAsRawFloat_out_isZero_T_1 = 1'h0; // @[rawFloatFromIN.scala:62:23] wire _intAsRawFloat_out_sExp_T_1 = 1'h0; // @[rawFloatFromIN.scala:64:36] RoundAnyRawFNToRecFN_ie2_is1_oe8_os24_38 roundAnyRawFNToRecFN (); // @[INToRecFN.scala:60:15] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_183( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_ppp, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_427( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_171 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_307( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output [19:0] io_y_ppn, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw = io_x_pw_0; // @[package.scala:267:30] wire io_y_px = io_x_px_0; // @[package.scala:267:30] wire io_y_pr = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff = io_x_eff_0; // @[package.scala:267:30] wire io_y_c = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_55( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [15:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [6:0] io_in_b_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [3:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire [12:0] _GEN_0 = {10'h0, io_in_c_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [1:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35] reg [1:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg [3:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] b_first_counter; // @[Edges.scala:229:27] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [6:0] source_2; // @[Monitor.scala:413:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35] reg [1:0] c_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [6:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [78:0] inflight; // @[Monitor.scala:614:27] reg [315:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [315:0] inflight_sizes; // @[Monitor.scala:618:33] reg [1:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 2'h0; // @[Edges.scala:229:27, :231:25] reg [1:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 2'h0; // @[Edges.scala:229:27, :231:25] wire [127:0] _GEN_1 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [127:0] _GEN_4 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [78:0] inflight_1; // @[Monitor.scala:726:35] reg [315:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [1:0] c_first_counter_1; // @[Edges.scala:229:27] wire c_first_1 = c_first_counter_1 == 2'h0; // @[Edges.scala:229:27, :231:25] reg [1:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}] wire [127:0] _GEN_6 = {121'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35] wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] reg [11:0] inflight_2; // @[Monitor.scala:828:27] reg [1:0] d_first_counter_3; // @[Edges.scala:229:27] wire d_first_3 = d_first_counter_3 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35] wire [15:0] _d_set_T = 16'h1 << io_in_d_bits_sink; // @[OneHot.scala:58:35] wire [11:0] d_set = _GEN_8 ? _d_set_T[11:0] : 12'h0; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File HellaCache.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3.{dontTouch, _} import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.amba.AMBAProtField import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType} import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters} import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata} import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle} import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt} import scala.collection.mutable.ListBuffer case class DCacheParams( nSets: Int = 64, nWays: Int = 4, rowBits: Int = 64, subWordBits: Option[Int] = None, replacementPolicy: String = "random", nTLBSets: Int = 1, nTLBWays: Int = 32, nTLBBasePageSectors: Int = 4, nTLBSuperpages: Int = 4, tagECC: Option[String] = None, dataECC: Option[String] = None, dataECCBytes: Int = 1, nMSHRs: Int = 1, nSDQ: Int = 17, nRPQ: Int = 16, nMMIOs: Int = 1, blockBytes: Int = 64, separateUncachedResp: Boolean = false, acquireBeforeRelease: Boolean = false, pipelineWayMux: Boolean = false, clockGate: Boolean = false, scratch: Option[BigInt] = None) extends L1CacheParams { def tagCode: Code = Code.fromString(tagECC) def dataCode: Code = Code.fromString(dataECC) def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0) def replacement = new RandomReplacement(nWays) def silentDrop: Boolean = !acquireBeforeRelease require((!scratch.isDefined || nWays == 1), "Scratchpad only allowed in direct-mapped cache.") require((!scratch.isDefined || nMSHRs == 0), "Scratchpad only allowed in blocking cache.") if (scratch.isEmpty) require(isPow2(nSets), s"nSets($nSets) must be pow2") } trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters { val cacheParams = tileParams.dcache.get val cfg = cacheParams def wordBits = coreDataBits def wordBytes = coreDataBytes def subWordBits = cacheParams.subWordBits.getOrElse(wordBits) def subWordBytes = subWordBits / 8 def wordOffBits = log2Up(wordBytes) def beatBytes = cacheBlockBytes / cacheDataBeats def beatWords = beatBytes / wordBytes def beatOffBits = log2Up(beatBytes) def idxMSB = untagBits-1 def idxLSB = blockOffBits def offsetmsb = idxLSB-1 def offsetlsb = wordOffBits def rowWords = rowBits/wordBits def doNarrowRead = coreDataBits * nWays % rowBits == 0 def eccBytes = cacheParams.dataECCBytes val eccBits = cacheParams.dataECCBytes * 8 val encBits = cacheParams.dataCode.width(eccBits) val encWordBits = encBits * (wordBits / eccBits) def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only def encRowBits = encDataBits*rowWords def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed def lrscBackoff = 3 // disallow LRSC reacquisition briefly def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant def nIOMSHRs = cacheParams.nMMIOs def maxUncachedInFlight = cacheParams.nMMIOs def dataScratchpadSize = cacheParams.dataScratchpadBytes require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)") if (!usingDataScratchpad) require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)") // would need offset addr for puts if data width < xlen require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)") } abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module with HasL1HellaCacheParameters abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasL1HellaCacheParameters /** Bundle definitions for HellaCache interfaces */ trait HasCoreMemOp extends HasL1HellaCacheParameters { val addr = UInt(coreMaxAddrBits.W) val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W)) val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W) val cmd = UInt(M_SZ.W) val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W) val signed = Bool() val dprv = UInt(PRV.SZ.W) val dv = Bool() } trait HasCoreData extends HasCoreParameters { val data = UInt(coreDataBits.W) val mask = UInt(coreDataBytes.W) } class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp { val phys = Bool() val no_resp = Bool() // The dcache may omit generating a response for this request val no_alloc = Bool() val no_xcpt = Bool() } class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp with HasCoreData { val replay = Bool() val has_data = Bool() val data_word_bypass = UInt(coreDataBits.W) val data_raw = UInt(coreDataBits.W) val store_data = UInt(coreDataBits.W) } class AlignmentExceptions extends Bundle { val ld = Bool() val st = Bool() } class HellaCacheExceptions extends Bundle { val ma = new AlignmentExceptions val pf = new AlignmentExceptions val gf = new AlignmentExceptions val ae = new AlignmentExceptions } class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData class HellaCachePerfEvents extends Bundle { val acquire = Bool() val release = Bool() val grant = Bool() val tlbMiss = Bool() val blocked = Bool() val canAcceptStoreThenLoad = Bool() val canAcceptStoreThenRMW = Bool() val canAcceptLoadThenLoad = Bool() val storeBufferEmptyAfterLoad = Bool() val storeBufferEmptyAfterStore = Bool() } // interface between D$ and processor/DTLB class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) { val req = Decoupled(new HellaCacheReq) val s1_kill = Output(Bool()) // kill previous cycle's req val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req val s2_nack = Input(Bool()) // req from two cycles ago is rejected val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint) val s2_kill = Output(Bool()) // kill req from two cycles ago val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO val s2_paddr = Input(UInt(paddrBits.W)) // translated address val resp = Flipped(Valid(new HellaCacheResp)) val replay_next = Input(Bool()) val s2_xcpt = Input(new HellaCacheExceptions) val s2_gpa = Input(UInt(vaddrBitsExtended.W)) val s2_gpa_is_pte = Input(Bool()) val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp))) val ordered = Input(Bool()) val store_pending = Input(Bool()) // there is a store in a store buffer somewhere val perf = Input(new HellaCachePerfEvents()) val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself? val clock_enabled = Input(Bool()) // is D$ currently being clocked? } /** Base classes for Diplomatic TL2 HellaCaches */ abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule with HasNonDiplomaticTileParameters { protected val cfg = tileParams.dcache.get protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache", sourceId = IdRange(0, 1 max cfg.nMSHRs), supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))) protected def mmioClientParameters = Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache MMIO", sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs), requestFifo = true)) def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max val node = TLClientNode(Seq(TLMasterPortParameters.v1( clients = cacheClientParameters ++ mmioClientParameters, minLatency = 1, requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField()))))) val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val module: HellaCacheModule def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT) def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits) require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$") } class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) { val cpu = Flipped(new HellaCacheIO) val ptw = new TLBPTWIO() val errors = new DCacheErrors val tlb_port = new DCacheTLBPort } class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer) with HasL1HellaCacheParameters { implicit val edge: TLEdgeOut = outer.node.edges.out(0) val (tl_out, _) = outer.node.out(0) val io = IO(new HellaCacheBundle) val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle) val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle) dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals dontTouch(io.cpu.s1_data) require(rowBits == edge.bundle.dataBits) private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile) fifoManagers.foreach { m => require (m.fifoId == fifoManagers.head.fifoId, s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+ s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}") } } /** Support overriding which HellaCache is instantiated */ case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply) object HellaCacheFactory { def apply(tile: BaseTile)(p: Parameters): HellaCache = { if (tile.tileParams.dcache.get.nMSHRs == 0) new DCache(tile.tileId, tile.crossing)(p) else new NonBlockingDCache(tile.tileId)(p) } } /** Mix-ins for constructing tiles that have a HellaCache */ trait HasHellaCache { this: BaseTile => val module: HasHellaCacheModule implicit val p: Parameters var nDCachePorts = 0 lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p)) tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode } dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode } InModuleBody { dcache.module.io.tlb_port := DontCare } } trait HasHellaCacheModule { val outer: HasHellaCache with HasTileParameters implicit val p: Parameters val dcachePorts = ListBuffer[HellaCacheIO]() val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p)) outer.dcache.module.io.cpu <> dcacheArb.io.mem } /** Metadata array used for all HellaCaches */ class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val coh = new ClientMetadata val tag = UInt(tagBits.W) } object L1Metadata { def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = { val meta = Wire(new L1Metadata) meta.tag := tag meta.coh := coh meta } } class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val tag = UInt(tagBits.W) } class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) { val data = new L1Metadata } class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) val resp = Output(Vec(nWays, rstVal.cloneType)) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U val waddr = Mux(rst, rst_cnt, io.write.bits.idx) val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools when (rst) { rst_cnt := rst_cnt+1.U } val metabits = rstVal.getWidth val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W))) val wen = rst || io.write.valid when (wen) { tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask) } io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal))) io.read.ready := !wen // so really this could be a 6T RAM io.write.ready := !rst }
module tag_array( // @[HellaCache.scala:339:30] input [5:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [175:0] RW0_wdata, output [175:0] RW0_rdata, input [7:0] RW0_wmask ); tag_array_ext tag_array_ext ( // @[HellaCache.scala:339:30] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata), .RW0_wmask (RW0_wmask) ); // @[HellaCache.scala:339:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_3( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_0, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [10:0] io_status_bits_set, // @[MSHR.scala:86:14] output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [3:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_0; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [8:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [10:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] reg [8:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [3:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106] wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66] wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module IntSyncCrossingSource_n1x1_7( // @[Crossing.scala:41:9] input clock, // @[Crossing.scala:41:9] input reset, // @[Crossing.scala:41:9] input auto_in_0, // @[LazyModuleImp.scala:107:25] output auto_out_sync_0 // @[LazyModuleImp.scala:107:25] ); wire auto_in_0_0 = auto_in_0; // @[Crossing.scala:41:9] wire nodeIn_0 = auto_in_0_0; // @[Crossing.scala:41:9] wire nodeOut_sync_0; // @[MixedNode.scala:542:17] wire auto_out_sync_0_0; // @[Crossing.scala:41:9] assign auto_out_sync_0_0 = nodeOut_sync_0; // @[Crossing.scala:41:9] AsyncResetRegVec_w1_i0_7 reg_0 ( // @[AsyncResetReg.scala:86:21] .clock (clock), .reset (reset), .io_d (nodeIn_0), // @[MixedNode.scala:551:17] .io_q (nodeOut_sync_0) ); // @[AsyncResetReg.scala:86:21] assign auto_out_sync_0 = auto_out_sync_0_0; // @[Crossing.scala:41:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } }
module NoCMonitor_98( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26] reg in_flight_3; // @[Monitor.scala:16:26] reg in_flight_4; // @[Monitor.scala:16:26] wire _GEN = io_in_flit_0_bits_virt_channel_id == 3'h0; // @[Monitor.scala:21:46] wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 3'h2; // @[Monitor.scala:21:46] wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 3'h3; // @[Monitor.scala:21:46] wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 3'h4; // @[Monitor.scala:21:46]
Generate the Verilog code corresponding to the following Chisel files. File ShiftRegisterPriorityQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ // TODO : support enq & deq at the same cycle class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle { val output_prev = KeyValue(keyWidth, value) val output_nxt = KeyValue(keyWidth, value) val input_prev = Flipped(KeyValue(keyWidth, value)) val input_nxt = Flipped(KeyValue(keyWidth, value)) val cmd = Flipped(Valid(UInt(1.W))) val insert_here = Input(Bool()) val cur_input_keyval = Flipped(KeyValue(keyWidth, value)) val cur_output_keyval = KeyValue(keyWidth, value) } class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module { val io = IO(new PriorityQueueStageIO(keyWidth, value)) dontTouch(io) val CMD_DEQ = 0.U val CMD_ENQ = 1.U val MAX_VALUE = (1 << keyWidth) - 1 val key_reg = RegInit(MAX_VALUE.U(keyWidth.W)) val value_reg = Reg(value) io.output_prev.key := key_reg io.output_prev.value := value_reg io.output_nxt.key := key_reg io.output_nxt.value := value_reg io.cur_output_keyval.key := key_reg io.cur_output_keyval.value := value_reg when (io.cmd.valid) { switch (io.cmd.bits) { is (CMD_DEQ) { key_reg := io.input_nxt.key value_reg := io.input_nxt.value } is (CMD_ENQ) { when (io.insert_here) { key_reg := io.cur_input_keyval.key value_reg := io.cur_input_keyval.value } .elsewhen (key_reg >= io.cur_input_keyval.key) { key_reg := io.input_prev.key value_reg := io.input_prev.value } .otherwise { // do nothing } } } } } object PriorityQueueStage { def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v) } // TODO // - This design is not scalable as the enqued_keyval is broadcasted to all the stages // - Add pipeline registers later class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle { val cnt_bits = log2Ceil(queSize+1) val counter = Output(UInt(cnt_bits.W)) val enq = Flipped(Decoupled(KeyValue(keyWidth, value))) val deq = Decoupled(KeyValue(keyWidth, value)) } class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module { val keyWidthInternal = keyWidth + 1 val CMD_DEQ = 0.U val CMD_ENQ = 1.U val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value)) dontTouch(io) val MAX_VALUE = ((1 << keyWidthInternal) - 1).U val cnt_bits = log2Ceil(queSize+1) // do not consider cases where we are inserting more entries then the queSize val counter = RegInit(0.U(cnt_bits.W)) io.counter := counter val full = (counter === queSize.U) val empty = (counter === 0.U) io.deq.valid := !empty io.enq.ready := !full when (io.enq.fire) { counter := counter + 1.U } when (io.deq.fire) { counter := counter - 1.U } val cmd_valid = io.enq.valid || io.deq.ready val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ) assert(!(io.enq.valid && io.deq.ready)) val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value))) for (i <- 0 until (queSize - 1)) { stages(i+1).io.input_prev <> stages(i).io.output_nxt stages(i).io.input_nxt <> stages(i+1).io.output_prev } stages(queSize-1).io.input_nxt.key := MAX_VALUE // stages(queSize-1).io.input_nxt.value := stages(queSize-1).io.input_nxt.value.symbol := 0.U // stages(queSize-1).io.input_nxt.value.child(0) := 0.U // stages(queSize-1).io.input_nxt.value.child(1) := 0.U stages(0).io.input_prev.key := io.enq.bits.key stages(0).io.input_prev.value <> io.enq.bits.value for (i <- 0 until queSize) { stages(i).io.cmd.valid := cmd_valid stages(i).io.cmd.bits := cmd stages(i).io.cur_input_keyval <> io.enq.bits } val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B))) for (i <- 0 until queSize) { is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key) } val is_large_or_equal_cat = Wire(UInt(queSize.W)) is_large_or_equal_cat := Cat(is_large_or_equal.reverse) val insert_here_idx = PriorityEncoder(is_large_or_equal_cat) for (i <- 0 until queSize) { when (i.U === insert_here_idx) { stages(i).io.insert_here := true.B } .otherwise { stages(i).io.insert_here := false.B } } io.deq.bits <> stages(0).io.output_prev }
module PriorityQueueStage_156( // @[ShiftRegisterPriorityQueue.scala:21:7] input clock, // @[ShiftRegisterPriorityQueue.scala:21:7] input reset, // @[ShiftRegisterPriorityQueue.scala:21:7] output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14] ); wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24] assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22] assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30] always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24] else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end else // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end else // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end always @(posedge) assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module IntSyncAsyncCrossingSink_n1x1_3( // @[Crossing.scala:74:9] input clock, // @[Crossing.scala:74:9] input reset, // @[Crossing.scala:74:9] input auto_in_sync_0, // @[LazyModuleImp.scala:107:25] output auto_out_0 // @[LazyModuleImp.scala:107:25] ); wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:74:9] wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:74:9] wire nodeOut_0; // @[MixedNode.scala:542:17] wire auto_out_0_0; // @[Crossing.scala:74:9] assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:74:9] SynchronizerShiftReg_w1_d3_3 chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (nodeIn_sync_0), // @[MixedNode.scala:551:17] .io_q (nodeOut_0) ); // @[ShiftReg.scala:45:23] assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:74:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_EntryData_55( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output [19:0] io_y_ppn, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_g, // @[package.scala:268:18] output io_y_ae, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c, // @[package.scala:268:18] output io_y_fragmented_superpage // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_0 = io_x_ae; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g_0 = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_0 = io_x_ae_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage_0 = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_g = io_y_g_0; // @[package.scala:267:30] assign io_y_ae = io_y_ae_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] assign io_y_fragmented_superpage = io_y_fragmented_superpage_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_36( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [15:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [127:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input [127:0] io_in_c_bits_data, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [127:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [3:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [15:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [127:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7] wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7] wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7] wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7] wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7] wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7] wire [127:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7] wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [127:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7] wire [3:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7] wire io_in_e_ready = 1'h1; // @[Monitor.scala:36:7] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_40 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_42 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:56:32] wire mask_sub_sub_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:206:21] wire mask_sub_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_size_1 = 1'h1; // @[Misc.scala:209:26] wire mask_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_2_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_3_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_2_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_3_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_4_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_5_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_6_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_7_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_size_1 = 1'h1; // @[Misc.scala:209:26] wire mask_acc_16 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_17 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_18 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_19 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_20 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_21 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_22 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_23 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_24 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_25 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_26 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_27 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_28 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_29 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_30 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_31 = 1'h1; // @[Misc.scala:215:29] wire _legal_source_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_26 = 1'h1; // @[Parameters.scala:54:32] wire _legal_source_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_28 = 1'h1; // @[Parameters.scala:54:67] wire _legal_source_T_29 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_30 = 1'h1; // @[Parameters.scala:56:48] wire _legal_source_WIRE_5 = 1'h1; // @[Parameters.scala:1138:31] wire legal_source = 1'h1; // @[Monitor.scala:168:113] wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_85 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_89 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_91 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_95 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_97 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_101 = 1'h1; // @[Parameters.scala:56:32] wire _b_first_beats1_opdata_T = 1'h1; // @[Edges.scala:97:37] wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire b_first_last = 1'h1; // @[Edges.scala:232:33] wire [5:0] io_in_b_bits_source = 6'h20; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_55 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_56 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_57 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_58 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_59 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _legal_source_uncommonBits_T = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _legal_source_uncommonBits_T_1 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _legal_source_uncommonBits_T_2 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _legal_source_uncommonBits_T_3 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _legal_source_uncommonBits_T_4 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _legal_source_T_37 = 6'h20; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_43 = 6'h20; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_44 = 6'h20; // @[Mux.scala:30:73] wire [5:0] _legal_source_WIRE_1 = 6'h20; // @[Mux.scala:30:73] wire [5:0] _uncommonBits_T_60 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_61 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_62 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_63 = 6'h20; // @[Parameters.scala:52:29] wire [5:0] _uncommonBits_T_64 = 6'h20; // @[Parameters.scala:52:29] wire [2:0] io_in_b_bits_opcode = 3'h6; // @[Monitor.scala:36:7] wire [2:0] io_in_b_bits_size = 3'h6; // @[Monitor.scala:36:7] wire [15:0] io_in_b_bits_mask = 16'hFFFF; // @[Monitor.scala:36:7] wire [15:0] mask_1 = 16'hFFFF; // @[Misc.scala:222:10] wire [127:0] io_in_b_bits_data = 128'h0; // @[Monitor.scala:36:7] wire io_in_b_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire mask_sub_sub_sub_size_1 = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_sub_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_sub_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38] wire mask_sub_size_1 = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_acc_T_8 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_9 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_10 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_11 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_12 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_13 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_14 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_15 = 1'h0; // @[Misc.scala:215:38] wire _legal_source_T = 1'h0; // @[Parameters.scala:46:9] wire _legal_source_T_2 = 1'h0; // @[Parameters.scala:54:32] wire _legal_source_T_4 = 1'h0; // @[Parameters.scala:54:67] wire _legal_source_T_6 = 1'h0; // @[Parameters.scala:56:48] wire _legal_source_T_8 = 1'h0; // @[Parameters.scala:54:32] wire _legal_source_T_10 = 1'h0; // @[Parameters.scala:54:67] wire _legal_source_T_12 = 1'h0; // @[Parameters.scala:56:48] wire _legal_source_T_14 = 1'h0; // @[Parameters.scala:54:32] wire _legal_source_T_16 = 1'h0; // @[Parameters.scala:54:67] wire _legal_source_T_18 = 1'h0; // @[Parameters.scala:56:48] wire _legal_source_T_20 = 1'h0; // @[Parameters.scala:54:32] wire _legal_source_T_22 = 1'h0; // @[Parameters.scala:54:67] wire _legal_source_T_24 = 1'h0; // @[Parameters.scala:56:48] wire _legal_source_T_31 = 1'h0; // @[Parameters.scala:46:9] wire _legal_source_WIRE_0 = 1'h0; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_1_0 = 1'h0; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_2 = 1'h0; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_3 = 1'h0; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_4 = 1'h0; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_6 = 1'h0; // @[Parameters.scala:1138:31] wire _legal_source_T_33 = 1'h0; // @[Mux.scala:30:73] wire b_first_beats1_opdata = 1'h0; // @[Edges.scala:97:28] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [3:0] _mask_sizeOH_T_4 = 4'h4; // @[OneHot.scala:65:12] wire [3:0] _mask_sizeOH_T_5 = 4'h4; // @[OneHot.scala:65:27] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _legal_source_T_25 = 3'h4; // @[Parameters.scala:54:10] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] uncommonBits_59 = 3'h0; // @[Parameters.scala:52:56] wire [2:0] legal_source_uncommonBits_4 = 3'h0; // @[Parameters.scala:52:56] wire [2:0] _legal_source_T_34 = 3'h0; // @[Mux.scala:30:73] wire [2:0] uncommonBits_64 = 3'h0; // @[Parameters.scala:52:56] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [1:0] uncommonBits_55 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_56 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_57 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_58 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] legal_source_uncommonBits = 2'h0; // @[Parameters.scala:52:56] wire [1:0] legal_source_uncommonBits_1 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] legal_source_uncommonBits_2 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] legal_source_uncommonBits_3 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_60 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_61 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_62 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] uncommonBits_63 = 2'h0; // @[Parameters.scala:52:56] wire [1:0] b_first_beats1 = 2'h0; // @[Edges.scala:221:14] wire [1:0] b_first_count = 2'h0; // @[Edges.scala:234:25] wire [1:0] mask_lo_lo_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_lo_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] b_first_beats1_decode = 2'h3; // @[Edges.scala:220:59] wire [5:0] is_aligned_mask_1 = 6'h3F; // @[package.scala:243:46] wire [5:0] _b_first_beats1_decode_T_2 = 6'h3F; // @[package.scala:243:46] wire [5:0] _is_aligned_mask_T_3 = 6'h0; // @[package.scala:243:76] wire [5:0] _legal_source_T_38 = 6'h0; // @[Mux.scala:30:73] wire [5:0] _b_first_beats1_decode_T_1 = 6'h0; // @[package.scala:243:76] wire [12:0] _is_aligned_mask_T_2 = 13'hFC0; // @[package.scala:243:71] wire [12:0] _b_first_beats1_decode_T = 13'hFC0; // @[package.scala:243:71] wire [4:0] _legal_source_T_32 = 5'h0; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_39 = 5'h0; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_40 = 5'h0; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_41 = 5'h0; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_42 = 5'h0; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_35 = 4'h0; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_36 = 4'h0; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_1 = 4'h8; // @[Parameters.scala:54:10] wire [3:0] _legal_source_T_7 = 4'h8; // @[Parameters.scala:54:10] wire [3:0] _legal_source_T_13 = 4'h8; // @[Parameters.scala:54:10] wire [3:0] _legal_source_T_19 = 4'h8; // @[Parameters.scala:54:10] wire [7:0] mask_lo_1 = 8'hFF; // @[Misc.scala:222:10] wire [7:0] mask_hi_1 = 8'hFF; // @[Misc.scala:222:10] wire [3:0] mask_lo_lo_1 = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_lo_hi_1 = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_hi_lo_1 = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_hi_hi_1 = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_sizeOH_1 = 4'h5; // @[Misc.scala:202:81] wire [1:0] mask_sizeOH_shiftAmount_1 = 2'h2; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_3 = 4'h6; // @[Misc.scala:202:34] wire [5:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_10 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_11 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_12 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_13 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_14 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_65 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_66 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_67 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_68 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_69 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_70 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_71 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_72 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_73 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_74 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_75 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_76 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_77 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_78 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_79 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_80 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_81 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_82 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_83 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_84 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_85 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_86 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_87 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_88 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_89 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_1 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_7 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_13 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_19 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] _source_ok_T_25 = io_in_a_bits_source_0[5:3]; // @[Monitor.scala:36:7] wire _source_ok_T_26 = _source_ok_T_25 == 3'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_29 = source_ok_uncommonBits_4 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire _source_ok_T_31 = io_in_a_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire _source_ok_T_32 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_36 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [3:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [3:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [3:0] mask_sizeOH = {_mask_sizeOH_T_2[3:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_sub_0_1 = io_in_a_bits_size_0[2]; // @[Misc.scala:206:21] wire mask_sub_sub_sub_size = mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_sub_bit = io_in_a_bits_address_0[3]; // @[Misc.scala:210:26] wire mask_sub_sub_sub_1_2 = mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_sub_nbit = ~mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_sub_0_2 = mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_sub_acc_T = mask_sub_sub_sub_size & mask_sub_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_0_1 = mask_sub_sub_sub_sub_0_1 | _mask_sub_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_sub_acc_T_1 = mask_sub_sub_sub_size & mask_sub_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_1_1 = mask_sub_sub_sub_sub_0_1 | _mask_sub_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_1_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_2_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_2 = mask_sub_sub_size & mask_sub_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_2_1 = mask_sub_sub_sub_1_1 | _mask_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_3_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_3 = mask_sub_sub_size & mask_sub_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_3_1 = mask_sub_sub_sub_1_1 | _mask_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_sub_4_2 = mask_sub_sub_2_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_4 = mask_sub_size & mask_sub_4_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_4_1 = mask_sub_sub_2_1 | _mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_sub_5_2 = mask_sub_sub_2_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_5 = mask_sub_size & mask_sub_5_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_5_1 = mask_sub_sub_2_1 | _mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_sub_6_2 = mask_sub_sub_3_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_6 = mask_sub_size & mask_sub_6_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_6_1 = mask_sub_sub_3_1 | _mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_sub_7_2 = mask_sub_sub_3_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_7 = mask_sub_size & mask_sub_7_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_7_1 = mask_sub_sub_3_1 | _mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_eq_8 = mask_sub_4_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_8 = mask_size & mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_8 = mask_sub_4_1 | _mask_acc_T_8; // @[Misc.scala:215:{29,38}] wire mask_eq_9 = mask_sub_4_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_9 = mask_size & mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_9 = mask_sub_4_1 | _mask_acc_T_9; // @[Misc.scala:215:{29,38}] wire mask_eq_10 = mask_sub_5_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_10 = mask_size & mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_10 = mask_sub_5_1 | _mask_acc_T_10; // @[Misc.scala:215:{29,38}] wire mask_eq_11 = mask_sub_5_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_11 = mask_size & mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_11 = mask_sub_5_1 | _mask_acc_T_11; // @[Misc.scala:215:{29,38}] wire mask_eq_12 = mask_sub_6_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_12 = mask_size & mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_12 = mask_sub_6_1 | _mask_acc_T_12; // @[Misc.scala:215:{29,38}] wire mask_eq_13 = mask_sub_6_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_13 = mask_size & mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_13 = mask_sub_6_1 | _mask_acc_T_13; // @[Misc.scala:215:{29,38}] wire mask_eq_14 = mask_sub_7_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_14 = mask_size & mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_14 = mask_sub_7_1 | _mask_acc_T_14; // @[Misc.scala:215:{29,38}] wire mask_eq_15 = mask_sub_7_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_15 = mask_size & mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_15 = mask_sub_7_1 | _mask_acc_T_15; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_lo = {mask_lo_lo_hi, mask_lo_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_hi = {mask_lo_hi_hi, mask_lo_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_lo = {mask_acc_9, mask_acc_8}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_lo_hi = {mask_acc_11, mask_acc_10}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_lo = {mask_hi_lo_hi, mask_hi_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_lo = {mask_acc_13, mask_acc_12}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi_hi = {mask_acc_15, mask_acc_14}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_hi = {mask_hi_hi_hi, mask_hi_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [15:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_37 = io_in_d_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_37; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_38 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_44 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_50 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_56 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_39 = _source_ok_T_38 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_41 = _source_ok_T_39; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_43 = _source_ok_T_41; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_43; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_45 = _source_ok_T_44 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_49; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_51 = _source_ok_T_50 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_55; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_57 = _source_ok_T_56 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] _source_ok_T_62 = io_in_d_bits_source_0[5:3]; // @[Monitor.scala:36:7] wire _source_ok_T_63 = _source_ok_T_62 == 3'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_66 = source_ok_uncommonBits_9 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_67 = _source_ok_T_65 & _source_ok_T_66; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_1_5 = _source_ok_T_67; // @[Parameters.scala:1138:31] wire _source_ok_T_68 = io_in_d_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_68; // @[Parameters.scala:1138:31] wire _source_ok_T_69 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_72 = _source_ok_T_71 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_73 = _source_ok_T_72 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_73 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire sink_ok = io_in_d_bits_sink_0[3:2] != 2'h3; // @[Monitor.scala:36:7, :309:31] wire [27:0] _GEN_0 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T = {io_in_b_bits_address_0[31:28], _GEN_0}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46] wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_5 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46] wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40] wire address_ok = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64] wire [31:0] _is_aligned_T_1 = {26'h0, io_in_b_bits_address_0[5:0]}; // @[Monitor.scala:36:7] wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}] wire mask_sub_sub_sub_bit_1 = io_in_b_bits_address_0[3]; // @[Misc.scala:210:26] wire mask_sub_sub_sub_1_2_1 = mask_sub_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_sub_nbit_1 = ~mask_sub_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_sub_0_2_1 = mask_sub_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2_1 = mask_sub_sub_sub_0_2_1 & mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_4 = mask_sub_sub_0_2_1; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_1_2_1 = mask_sub_sub_sub_0_2_1 & mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_5 = mask_sub_sub_1_2_1; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_2_2_1 = mask_sub_sub_sub_1_2_1 & mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_6 = mask_sub_sub_2_2_1; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_3_2_1 = mask_sub_sub_sub_1_2_1 & mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_7 = mask_sub_sub_3_2_1; // @[Misc.scala:214:27, :215:38] wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_4_2_1 = mask_sub_sub_2_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_5_2_1 = mask_sub_sub_2_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_6_2_1 = mask_sub_sub_3_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_7_2_1 = mask_sub_sub_3_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_eq_16 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_16 = mask_eq_16; // @[Misc.scala:214:27, :215:38] wire mask_eq_17 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_17 = mask_eq_17; // @[Misc.scala:214:27, :215:38] wire mask_eq_18 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_18 = mask_eq_18; // @[Misc.scala:214:27, :215:38] wire mask_eq_19 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_19 = mask_eq_19; // @[Misc.scala:214:27, :215:38] wire mask_eq_20 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_20 = mask_eq_20; // @[Misc.scala:214:27, :215:38] wire mask_eq_21 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_21 = mask_eq_21; // @[Misc.scala:214:27, :215:38] wire mask_eq_22 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_22 = mask_eq_22; // @[Misc.scala:214:27, :215:38] wire mask_eq_23 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_23 = mask_eq_23; // @[Misc.scala:214:27, :215:38] wire mask_eq_24 = mask_sub_4_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_24 = mask_eq_24; // @[Misc.scala:214:27, :215:38] wire mask_eq_25 = mask_sub_4_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_25 = mask_eq_25; // @[Misc.scala:214:27, :215:38] wire mask_eq_26 = mask_sub_5_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_26 = mask_eq_26; // @[Misc.scala:214:27, :215:38] wire mask_eq_27 = mask_sub_5_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_27 = mask_eq_27; // @[Misc.scala:214:27, :215:38] wire mask_eq_28 = mask_sub_6_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_28 = mask_eq_28; // @[Misc.scala:214:27, :215:38] wire mask_eq_29 = mask_sub_6_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_29 = mask_eq_29; // @[Misc.scala:214:27, :215:38] wire mask_eq_30 = mask_sub_7_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_30 = mask_eq_30; // @[Misc.scala:214:27, :215:38] wire mask_eq_31 = mask_sub_7_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_31 = mask_eq_31; // @[Misc.scala:214:27, :215:38] wire _source_ok_T_74 = io_in_c_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_0 = _source_ok_T_74; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_75 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_81 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_87 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_93 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_76 = _source_ok_T_75 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_1 = _source_ok_T_80; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_82 = _source_ok_T_81 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_86 = _source_ok_T_84; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_2 = _source_ok_T_86; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_12 = _source_ok_uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_88 = _source_ok_T_87 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_90 = _source_ok_T_88; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_92 = _source_ok_T_90; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_3 = _source_ok_T_92; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_13 = _source_ok_uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_94 = _source_ok_T_93 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_96 = _source_ok_T_94; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_98 = _source_ok_T_96; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_4 = _source_ok_T_98; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_14 = _source_ok_uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] _source_ok_T_99 = io_in_c_bits_source_0[5:3]; // @[Monitor.scala:36:7] wire _source_ok_T_100 = _source_ok_T_99 == 3'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_102 = _source_ok_T_100; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_103 = source_ok_uncommonBits_14 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_104 = _source_ok_T_102 & _source_ok_T_103; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_2_5 = _source_ok_T_104; // @[Parameters.scala:1138:31] wire _source_ok_T_105 = io_in_c_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_6 = _source_ok_T_105; // @[Parameters.scala:1138:31] wire _source_ok_T_106 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_107 = _source_ok_T_106 | _source_ok_WIRE_2_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_108 = _source_ok_T_107 | _source_ok_WIRE_2_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_109 = _source_ok_T_108 | _source_ok_WIRE_2_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_110 = _source_ok_T_109 | _source_ok_WIRE_2_5; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_2 = _source_ok_T_110 | _source_ok_WIRE_2_6; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN_1 = 13'h3F << io_in_c_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T_4; // @[package.scala:243:71] assign _is_aligned_mask_T_4 = _GEN_1; // @[package.scala:243:71] wire [12:0] _c_first_beats1_decode_T; // @[package.scala:243:71] assign _c_first_beats1_decode_T = _GEN_1; // @[package.scala:243:71] wire [12:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71] assign _c_first_beats1_decode_T_3 = _GEN_1; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T_2 = {26'h0, io_in_c_bits_address_0[5:0] & is_aligned_mask_2}; // @[package.scala:243:46] wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}] wire [27:0] _GEN_2 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_10 = {io_in_c_bits_address_0[31:28], _GEN_2}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46] wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_0 = _address_ok_T_14; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_15 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46] wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_1 = _address_ok_T_19; // @[Parameters.scala:612:40] wire address_ok_1 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64] wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_66 = _uncommonBits_T_66[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_67 = _uncommonBits_T_67[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_68 = _uncommonBits_T_68[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_69 = _uncommonBits_T_69[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_70 = _uncommonBits_T_70[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_71 = _uncommonBits_T_71[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_72 = _uncommonBits_T_72[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_73 = _uncommonBits_T_73[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_74 = _uncommonBits_T_74[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_75 = _uncommonBits_T_75[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_76 = _uncommonBits_T_76[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_77 = _uncommonBits_T_77[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_78 = _uncommonBits_T_78[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_79 = _uncommonBits_T_79[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_80 = _uncommonBits_T_80[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_81 = _uncommonBits_T_81[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_82 = _uncommonBits_T_82[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_83 = _uncommonBits_T_83[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_84 = _uncommonBits_T_84[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_85 = _uncommonBits_T_85[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_86 = _uncommonBits_T_86[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_87 = _uncommonBits_T_87[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_88 = _uncommonBits_T_88[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_89 = _uncommonBits_T_89[2:0]; // @[Parameters.scala:52:{29,56}] wire sink_ok_1 = io_in_e_bits_sink_0[3:2] != 2'h3; // @[Monitor.scala:36:7, :367:31] wire _T_2103 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_2103; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_2103; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [1:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:4]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [1:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 2'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [1:0] a_first_counter; // @[Edges.scala:229:27] wire [2:0] _a_first_counter1_T = {1'h0, a_first_counter} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] a_first_counter1 = _a_first_counter1_T[1:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 2'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 2'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 2'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [1:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [1:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [5:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_2177 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_2177; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_2177; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_2177; // @[Decoupled.scala:51:35] wire _d_first_T_3; // @[Decoupled.scala:51:35] assign _d_first_T_3 = _T_2177; // @[Decoupled.scala:51:35] wire [12:0] _GEN_3 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71] assign _d_first_beats1_decode_T_9 = _GEN_3; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [1:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:4]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [1:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 2'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [1:0] d_first_counter; // @[Edges.scala:229:27] wire [2:0] _d_first_counter1_T = {1'h0, d_first_counter} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] d_first_counter1 = _d_first_counter1_T[1:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 2'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 2'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 2'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [1:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [1:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [5:0] source_1; // @[Monitor.scala:541:22] reg [3:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35] wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35] reg [1:0] b_first_counter; // @[Edges.scala:229:27] wire [2:0] _b_first_counter1_T = {1'h0, b_first_counter} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] b_first_counter1 = _b_first_counter1_T[1:0]; // @[Edges.scala:230:28] wire b_first = b_first_counter == 2'h0; // @[Edges.scala:229:27, :231:25] wire _b_first_last_T = b_first_counter == 2'h1; // @[Edges.scala:229:27, :232:25] wire [1:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27] wire [1:0] _b_first_counter_T = b_first ? 2'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _T_2174 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35] wire _c_first_T; // @[Decoupled.scala:51:35] assign _c_first_T = _T_2174; // @[Decoupled.scala:51:35] wire _c_first_T_1; // @[Decoupled.scala:51:35] assign _c_first_T_1 = _T_2174; // @[Decoupled.scala:51:35] wire [5:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [1:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[5:4]; // @[package.scala:243:46] wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [1:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 2'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [1:0] c_first_counter; // @[Edges.scala:229:27] wire [2:0] _c_first_counter1_T = {1'h0, c_first_counter} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] c_first_counter1 = _c_first_counter1_T[1:0]; // @[Edges.scala:230:28] wire c_first = c_first_counter == 2'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T = c_first_counter == 2'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_1 = c_first_beats1 == 2'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35] wire [1:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27] wire [1:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [5:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [40:0] inflight; // @[Monitor.scala:614:27] reg [163:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [163:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [1:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:4]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [1:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 2'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [1:0] a_first_counter_1; // @[Edges.scala:229:27] wire [2:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] a_first_counter1_1 = _a_first_counter1_T_1[1:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 2'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 2'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [1:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [1:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [1:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:4]; // @[package.scala:243:46] wire [1:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 2'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [1:0] d_first_counter_1; // @[Edges.scala:229:27] wire [2:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] d_first_counter1_1 = _d_first_counter1_T_1[1:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 2'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 2'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [1:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [1:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [40:0] a_set; // @[Monitor.scala:626:34] wire [40:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [163:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [163:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [8:0] _GEN_4 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [8:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_4; // @[Monitor.scala:637:69] wire [8:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :641:65] wire [8:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_4; // @[Monitor.scala:637:69, :680:101] wire [8:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_4; // @[Monitor.scala:637:69, :681:99] wire [8:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :749:69] wire [8:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :750:67] wire [8:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_4; // @[Monitor.scala:637:69, :790:101] wire [8:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_4; // @[Monitor.scala:637:69, :791:99] wire [163:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [163:0] _a_opcode_lookup_T_6 = {160'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [163:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[163:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [163:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [163:0] _a_size_lookup_T_6 = {160'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [163:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[163:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [63:0] _GEN_5 = 64'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [63:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_5; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[40:0] : 41'h0; // @[OneHot.scala:58:35] wire _T_2029 = _T_2103 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_2029 ? _a_set_T[40:0] : 41'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_2029 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_2029 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [8:0] _GEN_6 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [8:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_6; // @[Monitor.scala:659:79] wire [8:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_6; // @[Monitor.scala:659:79, :660:77] wire [514:0] _a_opcodes_set_T_1 = {511'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_2029 ? _a_opcodes_set_T_1[163:0] : 164'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [514:0] _a_sizes_set_T_1 = {511'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_2029 ? _a_sizes_set_T_1[163:0] : 164'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [40:0] d_clr; // @[Monitor.scala:664:34] wire [40:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [163:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [163:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_7 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_7; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_7; // @[Monitor.scala:673:46, :783:46] wire _T_2075 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [63:0] _GEN_8 = 64'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_8; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_2075 & ~d_release_ack ? _d_clr_wo_ready_T[40:0] : 41'h0; // @[OneHot.scala:58:35] wire _T_2044 = _T_2177 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_2044 ? _d_clr_T[40:0] : 41'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_5 = 527'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_2044 ? _d_opcodes_clr_T_5[163:0] : 164'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [526:0] _d_sizes_clr_T_5 = 527'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_2044 ? _d_sizes_clr_T_5[163:0] : 164'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [40:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [40:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [40:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [163:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [163:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [163:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [163:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [163:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [163:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [40:0] inflight_1; // @[Monitor.scala:726:35] reg [163:0] inflight_opcodes_1; // @[Monitor.scala:727:35] reg [163:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [5:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [1:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[5:4]; // @[package.scala:243:46] wire [1:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 2'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [1:0] c_first_counter_1; // @[Edges.scala:229:27] wire [2:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] c_first_counter1_1 = _c_first_counter1_T_1[1:0]; // @[Edges.scala:230:28] wire c_first_1 = c_first_counter_1 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T_2 = c_first_counter_1 == 2'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_3 = c_first_beats1_1 == 2'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35] wire [1:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [1:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [1:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:4]; // @[package.scala:243:46] wire [1:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 2'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [1:0] d_first_counter_2; // @[Edges.scala:229:27] wire [2:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] d_first_counter1_2 = _d_first_counter1_T_2[1:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 2'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 2'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [1:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [1:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [40:0] c_set; // @[Monitor.scala:738:34] wire [40:0] c_set_wo_ready; // @[Monitor.scala:739:34] wire [163:0] c_opcodes_set; // @[Monitor.scala:740:34] wire [163:0] c_sizes_set; // @[Monitor.scala:741:34] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [163:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [163:0] _c_opcode_lookup_T_6 = {160'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [163:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[163:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [163:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [163:0] _c_size_lookup_T_6 = {160'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [163:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[163:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm; // @[Monitor.scala:755:40] wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44] wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7] wire [63:0] _GEN_9 = 64'h1 << io_in_c_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35] assign _c_set_wo_ready_T = _GEN_9; // @[OneHot.scala:58:35] wire [63:0] _c_set_T; // @[OneHot.scala:58:35] assign _c_set_T = _GEN_9; // @[OneHot.scala:58:35] assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T[40:0] : 41'h0; // @[OneHot.scala:58:35] wire _T_2116 = _T_2174 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35] assign c_set = _T_2116 ? _c_set_T[40:0] : 41'h0; // @[OneHot.scala:58:35] wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53] wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}] assign c_opcodes_set_interm = _T_2116 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}] wire [3:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51] wire [3:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:766:{51,59}] assign c_sizes_set_interm = _T_2116 ? _c_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}] wire [8:0] _GEN_10 = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79] wire [8:0] _c_opcodes_set_T; // @[Monitor.scala:767:79] assign _c_opcodes_set_T = _GEN_10; // @[Monitor.scala:767:79] wire [8:0] _c_sizes_set_T; // @[Monitor.scala:768:77] assign _c_sizes_set_T = _GEN_10; // @[Monitor.scala:767:79, :768:77] wire [514:0] _c_opcodes_set_T_1 = {511'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:659:54, :754:40, :767:{54,79}] assign c_opcodes_set = _T_2116 ? _c_opcodes_set_T_1[163:0] : 164'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}] wire [514:0] _c_sizes_set_T_1 = {511'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:659:54, :755:40, :768:{52,77}] assign c_sizes_set = _T_2116 ? _c_sizes_set_T_1[163:0] : 164'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}] wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47] wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95] wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}] wire [40:0] d_clr_1; // @[Monitor.scala:774:34] wire [40:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [163:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [163:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_2147 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_2147 & d_release_ack_1 ? _d_clr_wo_ready_T_1[40:0] : 41'h0; // @[OneHot.scala:58:35] wire _T_2129 = _T_2177 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_2129 ? _d_clr_T_1[40:0] : 41'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_11 = 527'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_2129 ? _d_opcodes_clr_T_11[163:0] : 164'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [526:0] _d_sizes_clr_T_11 = 527'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_2129 ? _d_sizes_clr_T_11[163:0] : 164'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}] wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}] wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113] wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}] wire [40:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35] wire [40:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [40:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [163:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43] wire [163:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [163:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [163:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41] wire [163:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [163:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26] wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26] reg [11:0] inflight_2; // @[Monitor.scala:828:27] wire [5:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}] wire [1:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[5:4]; // @[package.scala:243:46] wire [1:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 2'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [1:0] d_first_counter_3; // @[Edges.scala:229:27] wire [2:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 3'h1; // @[Edges.scala:229:27, :230:28] wire [1:0] d_first_counter1_3 = _d_first_counter1_T_3[1:0]; // @[Edges.scala:230:28] wire d_first_3 = d_first_counter_3 == 2'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_6 = d_first_counter_3 == 2'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_7 = d_first_beats1_3 == 2'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}] wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35] wire [1:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27] wire [1:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}] wire [1:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] d_set; // @[Monitor.scala:833:25] wire _T_2183 = _T_2177 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35] wire [15:0] _d_set_T = 16'h1 << io_in_d_bits_sink_0; // @[OneHot.scala:58:35] assign d_set = _T_2183 ? _d_set_T[11:0] : 12'h0; // @[OneHot.scala:58:35] wire [11:0] e_clr; // @[Monitor.scala:839:25] wire [15:0] _e_clr_T = 16'h1 << io_in_e_bits_sink_0; // @[OneHot.scala:58:35] assign e_clr = io_in_e_valid_0 ? _e_clr_T[11:0] : 12'h0; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File IBuf.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Decoupled,log2Ceil,Cat,UIntToOH,Fill} import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tile._ import freechips.rocketchip.util._ class Instruction(implicit val p: Parameters) extends ParameterizedBundle with HasCoreParameters { val xcpt0 = new FrontendExceptions // exceptions on first half of instruction val xcpt1 = new FrontendExceptions // exceptions on second half of instruction val replay = Bool() val rvc = Bool() val inst = new ExpandedInstruction val raw = UInt(32.W) require(coreInstBits == (if (usingCompressed) 16 else 32)) } class IBuf(implicit p: Parameters) extends CoreModule { val io = IO(new Bundle { val imem = Flipped(Decoupled(new FrontendResp)) val kill = Input(Bool()) val pc = Output(UInt(vaddrBitsExtended.W)) val btb_resp = Output(new BTBResp()) val inst = Vec(retireWidth, Decoupled(new Instruction)) }) // This module is meant to be more general, but it's not there yet require(decodeWidth == 1) val n = fetchWidth - 1 val nBufValid = if (n == 0) 0.U else RegInit(init=0.U(log2Ceil(fetchWidth).W)) val buf = Reg(chiselTypeOf(io.imem.bits)) val ibufBTBResp = Reg(new BTBResp) val pcWordMask = (coreInstBytes*fetchWidth-1).U(vaddrBitsExtended.W) val pcWordBits = io.imem.bits.pc.extract(log2Ceil(fetchWidth*coreInstBytes)-1, log2Ceil(coreInstBytes)) val nReady = WireDefault(0.U(log2Ceil(fetchWidth+1).W)) val nIC = Mux(io.imem.bits.btb.taken, io.imem.bits.btb.bridx +& 1.U, fetchWidth.U) - pcWordBits val nICReady = nReady - nBufValid val nValid = Mux(io.imem.valid, nIC, 0.U) + nBufValid io.imem.ready := io.inst(0).ready && nReady >= nBufValid && (nICReady >= nIC || n.U >= nIC - nICReady) if (n > 0) { when (io.inst(0).ready) { nBufValid := Mux(nReady >== nBufValid, 0.U, nBufValid - nReady) if (n > 1) when (nReady > 0.U && nReady < nBufValid) { val shiftedBuf = shiftInsnRight(buf.data(n*coreInstBits-1, coreInstBits), (nReady-1.U)(log2Ceil(n-1)-1,0)) buf.data := Cat(buf.data(n*coreInstBits-1, (n-1)*coreInstBits), shiftedBuf((n-1)*coreInstBits-1, 0)) buf.pc := buf.pc & ~pcWordMask | (buf.pc + (nReady << log2Ceil(coreInstBytes))) & pcWordMask } when (io.imem.valid && nReady >= nBufValid && nICReady < nIC && n.U >= nIC - nICReady) { val shamt = pcWordBits + nICReady nBufValid := nIC - nICReady buf := io.imem.bits buf.data := shiftInsnRight(io.imem.bits.data, shamt)(n*coreInstBits-1,0) buf.pc := io.imem.bits.pc & ~pcWordMask | (io.imem.bits.pc + (nICReady << log2Ceil(coreInstBytes))) & pcWordMask ibufBTBResp := io.imem.bits.btb } } when (io.kill) { nBufValid := 0.U } } val icShiftAmt = (fetchWidth.U + nBufValid - pcWordBits)(log2Ceil(fetchWidth), 0) val icData = shiftInsnLeft(Cat(io.imem.bits.data, Fill(fetchWidth, io.imem.bits.data(coreInstBits-1, 0))), icShiftAmt) .extract(3*fetchWidth*coreInstBits-1, 2*fetchWidth*coreInstBits) val icMask = (~0.U((fetchWidth*coreInstBits).W) << (nBufValid << log2Ceil(coreInstBits)))(fetchWidth*coreInstBits-1,0) val inst = icData & icMask | buf.data & ~icMask val valid = (UIntToOH(nValid) - 1.U)(fetchWidth-1, 0) val bufMask = UIntToOH(nBufValid) - 1.U val xcpt = (0 until bufMask.getWidth).map(i => Mux(bufMask(i), buf.xcpt, io.imem.bits.xcpt)) val buf_replay = Mux(buf.replay, bufMask, 0.U) val ic_replay = buf_replay | Mux(io.imem.bits.replay, valid & ~bufMask, 0.U) assert(!io.imem.valid || !io.imem.bits.btb.taken || io.imem.bits.btb.bridx >= pcWordBits) io.btb_resp := io.imem.bits.btb io.pc := Mux(nBufValid > 0.U, buf.pc, io.imem.bits.pc) expand(0, 0.U, inst) def expand(i: Int, j: UInt, curInst: UInt): Unit = if (i < retireWidth) { val exp = Module(new RVCExpander) exp.io.in := curInst io.inst(i).bits.inst := exp.io.out io.inst(i).bits.raw := curInst if (usingCompressed) { val replay = ic_replay(j) || (!exp.io.rvc && ic_replay(j+1.U)) val full_insn = exp.io.rvc || valid(j+1.U) || buf_replay(j) io.inst(i).valid := valid(j) && full_insn io.inst(i).bits.xcpt0 := xcpt(j) io.inst(i).bits.xcpt1 := Mux(exp.io.rvc, 0.U, xcpt(j+1.U).asUInt).asTypeOf(new FrontendExceptions) io.inst(i).bits.replay := replay io.inst(i).bits.rvc := exp.io.rvc when ((bufMask(j) && exp.io.rvc) || bufMask(j+1.U)) { io.btb_resp := ibufBTBResp } when (full_insn && ((i == 0).B || io.inst(i).ready)) { nReady := Mux(exp.io.rvc, j+1.U, j+2.U) } expand(i+1, Mux(exp.io.rvc, j+1.U, j+2.U), Mux(exp.io.rvc, curInst >> 16, curInst >> 32)) } else { when ((i == 0).B || io.inst(i).ready) { nReady := (i+1).U } io.inst(i).valid := valid(i) io.inst(i).bits.xcpt0 := xcpt(i) io.inst(i).bits.xcpt1 := 0.U.asTypeOf(new FrontendExceptions) io.inst(i).bits.replay := ic_replay(i) io.inst(i).bits.rvc := false.B expand(i+1, null, curInst >> 32) } } def shiftInsnLeft(in: UInt, dist: UInt) = { val r = in.getWidth/coreInstBits require(in.getWidth % coreInstBits == 0) val data = Cat(Fill((1 << (log2Ceil(r) + 1)) - r, in >> (r-1)*coreInstBits), in) data << (dist << log2Ceil(coreInstBits)) } def shiftInsnRight(in: UInt, dist: UInt) = { val r = in.getWidth/coreInstBits require(in.getWidth % coreInstBits == 0) val data = Cat(Fill((1 << (log2Ceil(r) + 1)) - r, in >> (r-1)*coreInstBits), in) data >> (dist << log2Ceil(coreInstBits)) } }
module IBuf( // @[IBuf.scala:21:7] input clock, // @[IBuf.scala:21:7] input reset, // @[IBuf.scala:21:7] output io_imem_ready, // @[IBuf.scala:22:14] input io_imem_valid, // @[IBuf.scala:22:14] input io_imem_bits_btb_taken, // @[IBuf.scala:22:14] input io_imem_bits_btb_bridx, // @[IBuf.scala:22:14] input [4:0] io_imem_bits_btb_entry, // @[IBuf.scala:22:14] input [7:0] io_imem_bits_btb_bht_history, // @[IBuf.scala:22:14] input [39:0] io_imem_bits_pc, // @[IBuf.scala:22:14] input [31:0] io_imem_bits_data, // @[IBuf.scala:22:14] input io_imem_bits_xcpt_pf_inst, // @[IBuf.scala:22:14] input io_imem_bits_xcpt_gf_inst, // @[IBuf.scala:22:14] input io_imem_bits_xcpt_ae_inst, // @[IBuf.scala:22:14] input io_imem_bits_replay, // @[IBuf.scala:22:14] input io_kill, // @[IBuf.scala:22:14] output [39:0] io_pc, // @[IBuf.scala:22:14] output [4:0] io_btb_resp_entry, // @[IBuf.scala:22:14] output [7:0] io_btb_resp_bht_history, // @[IBuf.scala:22:14] input io_inst_0_ready, // @[IBuf.scala:22:14] output io_inst_0_valid, // @[IBuf.scala:22:14] output io_inst_0_bits_xcpt0_pf_inst, // @[IBuf.scala:22:14] output io_inst_0_bits_xcpt0_gf_inst, // @[IBuf.scala:22:14] output io_inst_0_bits_xcpt0_ae_inst, // @[IBuf.scala:22:14] output io_inst_0_bits_xcpt1_pf_inst, // @[IBuf.scala:22:14] output io_inst_0_bits_xcpt1_gf_inst, // @[IBuf.scala:22:14] output io_inst_0_bits_xcpt1_ae_inst, // @[IBuf.scala:22:14] output io_inst_0_bits_replay, // @[IBuf.scala:22:14] output io_inst_0_bits_rvc, // @[IBuf.scala:22:14] output [31:0] io_inst_0_bits_inst_bits, // @[IBuf.scala:22:14] output [4:0] io_inst_0_bits_inst_rd, // @[IBuf.scala:22:14] output [4:0] io_inst_0_bits_inst_rs1, // @[IBuf.scala:22:14] output [4:0] io_inst_0_bits_inst_rs2, // @[IBuf.scala:22:14] output [4:0] io_inst_0_bits_inst_rs3, // @[IBuf.scala:22:14] output [31:0] io_inst_0_bits_raw // @[IBuf.scala:22:14] ); wire [1:0] nReady; // @[IBuf.scala:40:27, :102:{60,69}] wire _exp_io_rvc; // @[IBuf.scala:86:21] reg nBufValid; // @[IBuf.scala:34:47] reg [39:0] buf_pc; // @[IBuf.scala:35:16] reg [31:0] buf_data; // @[IBuf.scala:35:16] reg buf_xcpt_pf_inst; // @[IBuf.scala:35:16] reg buf_xcpt_gf_inst; // @[IBuf.scala:35:16] reg buf_xcpt_ae_inst; // @[IBuf.scala:35:16] reg buf_replay; // @[IBuf.scala:35:16] reg [4:0] ibufBTBResp_entry; // @[IBuf.scala:36:24] reg [7:0] ibufBTBResp_bht_history; // @[IBuf.scala:36:24] wire [1:0] _GEN = {1'h0, io_imem_bits_pc[1]}; // @[package.scala:163:13] wire [1:0] _nIC_T_2 = (io_imem_bits_btb_taken ? {1'h0, io_imem_bits_btb_bridx} + 2'h1 : 2'h2) - _GEN; // @[IBuf.scala:41:{16,64,86}] wire [1:0] _GEN_0 = {1'h0, nBufValid}; // @[IBuf.scala:34:47, :42:25] wire [1:0] _nICReady_T = nReady - _GEN_0; // @[IBuf.scala:40:27, :42:25, :102:{60,69}] wire _nBufValid_T = nReady >= _GEN_0; // @[IBuf.scala:40:27, :42:25, :44:47, :102:{60,69}] wire [1:0] _nBufValid_T_6 = _nIC_T_2 - _nICReady_T; // @[IBuf.scala:41:86, :42:25, :44:94] wire [190:0] _icData_T_4 = {63'h0, {2{{2{io_imem_bits_data[31:16]}}}}, io_imem_bits_data, {2{io_imem_bits_data[15:0]}}} << {185'h0, _GEN_0 - 2'h2 - _GEN, 4'h0}; // @[IBuf.scala:41:86, :42:25, :68:{34,46}, :69:{57,87}, :120:{24,58}, :121:10] wire [62:0] _icMask_T_2 = 63'hFFFFFFFF << {58'h0, nBufValid, 4'h0}; // @[IBuf.scala:34:47, :71:51] wire [31:0] inst = _icData_T_4[95:64] & _icMask_T_2[31:0] | buf_data & ~(_icMask_T_2[31:0]); // @[package.scala:163:13] wire [3:0] _valid_T = 4'h1 << (io_imem_valid ? _nIC_T_2 : 2'h0) + _GEN_0; // @[OneHot.scala:58:35] wire [1:0] _valid_T_1 = _valid_T[1:0] - 2'h1; // @[OneHot.scala:58:35] wire [1:0] _bufMask_T_1 = (2'h1 << _GEN_0) - 2'h1; // @[OneHot.scala:58:35] wire [1:0] buf_replay_0 = buf_replay ? _bufMask_T_1 : 2'h0; // @[IBuf.scala:35:16, :75:37, :77:23] wire [1:0] ic_replay = buf_replay_0 | (io_imem_bits_replay ? _valid_T_1 & ~_bufMask_T_1 : 2'h0); // @[IBuf.scala:74:33, :75:37, :77:23, :78:{30,35,63,65}]
Generate the Verilog code corresponding to the following Chisel files. File MulRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to Chisel by Andrew Waterman). Copyright 2019, 2020 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1)) }) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf) val notNaN_isInfOut = io.a.isInf || io.b.isInf val notNaN_isZeroOut = io.a.isZero || io.b.isZero val notNaN_signOut = io.a.sign ^ io.b.sign val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc io.rawOut.isInf := notNaN_isInfOut io.rawOut.isZero := notNaN_isZeroOut io.rawOut.sExp := common_sExpOut io.rawOut.isNaN := io.a.isNaN || io.b.isNaN io.rawOut.sign := notNaN_signOut io.rawOut.sig := common_sigOut } class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth)) mulFullRaw.io.a := io.a mulFullRaw.io.b := io.b io.invalidExc := mulFullRaw.io.invalidExc io.rawOut := mulFullRaw.io.rawOut io.rawOut.sig := { val sig = mulFullRaw.io.rawOut.sig Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR) } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(UInt((expWidth + sigWidth + 1).W)) val b = Input(UInt((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(Bool()) val out = Output(UInt((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(UInt(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulRawFN = Module(new MulRawFN(expWidth, sigWidth)) mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a) mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc roundRawFNToRecFN.io.infiniteExc := false.B roundRawFNToRecFN.io.in := mulRawFN.io.rawOut roundRawFNToRecFN.io.roundingMode := io.roundingMode roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags }
module MulRawFN_23( // @[MulRecFN.scala:75:7] input io_a_isNaN, // @[MulRecFN.scala:77:16] input io_a_isInf, // @[MulRecFN.scala:77:16] input io_a_isZero, // @[MulRecFN.scala:77:16] input io_a_sign, // @[MulRecFN.scala:77:16] input [9:0] io_a_sExp, // @[MulRecFN.scala:77:16] input [24:0] io_a_sig, // @[MulRecFN.scala:77:16] input io_b_isNaN, // @[MulRecFN.scala:77:16] input io_b_isInf, // @[MulRecFN.scala:77:16] input io_b_isZero, // @[MulRecFN.scala:77:16] input io_b_sign, // @[MulRecFN.scala:77:16] input [9:0] io_b_sExp, // @[MulRecFN.scala:77:16] input [24:0] io_b_sig, // @[MulRecFN.scala:77:16] output io_invalidExc, // @[MulRecFN.scala:77:16] output io_rawOut_isNaN, // @[MulRecFN.scala:77:16] output io_rawOut_isInf, // @[MulRecFN.scala:77:16] output io_rawOut_isZero, // @[MulRecFN.scala:77:16] output io_rawOut_sign, // @[MulRecFN.scala:77:16] output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:77:16] output [26:0] io_rawOut_sig // @[MulRecFN.scala:77:16] ); wire [47:0] _mulFullRaw_io_rawOut_sig; // @[MulRecFN.scala:84:28] wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:75:7] wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:75:7] wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:75:7] wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:75:7] wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:75:7] wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:75:7] wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:75:7] wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:75:7] wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:75:7] wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:75:7] wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:75:7] wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:75:7] wire [26:0] _io_rawOut_sig_T_3; // @[MulRecFN.scala:93:10] wire io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7] wire io_rawOut_isInf_0; // @[MulRecFN.scala:75:7] wire io_rawOut_isZero_0; // @[MulRecFN.scala:75:7] wire io_rawOut_sign_0; // @[MulRecFN.scala:75:7] wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:75:7] wire [26:0] io_rawOut_sig_0; // @[MulRecFN.scala:75:7] wire io_invalidExc_0; // @[MulRecFN.scala:75:7] wire [25:0] _io_rawOut_sig_T = _mulFullRaw_io_rawOut_sig[47:22]; // @[MulRecFN.scala:84:28, :93:15] wire [21:0] _io_rawOut_sig_T_1 = _mulFullRaw_io_rawOut_sig[21:0]; // @[MulRecFN.scala:84:28, :93:37] wire _io_rawOut_sig_T_2 = |_io_rawOut_sig_T_1; // @[MulRecFN.scala:93:{37,55}] assign _io_rawOut_sig_T_3 = {_io_rawOut_sig_T, _io_rawOut_sig_T_2}; // @[MulRecFN.scala:93:{10,15,55}] assign io_rawOut_sig_0 = _io_rawOut_sig_T_3; // @[MulRecFN.scala:75:7, :93:10] MulFullRawFN_23 mulFullRaw ( // @[MulRecFN.scala:84:28] .io_a_isNaN (io_a_isNaN_0), // @[MulRecFN.scala:75:7] .io_a_isInf (io_a_isInf_0), // @[MulRecFN.scala:75:7] .io_a_isZero (io_a_isZero_0), // @[MulRecFN.scala:75:7] .io_a_sign (io_a_sign_0), // @[MulRecFN.scala:75:7] .io_a_sExp (io_a_sExp_0), // @[MulRecFN.scala:75:7] .io_a_sig (io_a_sig_0), // @[MulRecFN.scala:75:7] .io_b_isNaN (io_b_isNaN_0), // @[MulRecFN.scala:75:7] .io_b_isInf (io_b_isInf_0), // @[MulRecFN.scala:75:7] .io_b_isZero (io_b_isZero_0), // @[MulRecFN.scala:75:7] .io_b_sign (io_b_sign_0), // @[MulRecFN.scala:75:7] .io_b_sExp (io_b_sExp_0), // @[MulRecFN.scala:75:7] .io_b_sig (io_b_sig_0), // @[MulRecFN.scala:75:7] .io_invalidExc (io_invalidExc_0), .io_rawOut_isNaN (io_rawOut_isNaN_0), .io_rawOut_isInf (io_rawOut_isInf_0), .io_rawOut_isZero (io_rawOut_isZero_0), .io_rawOut_sign (io_rawOut_sign_0), .io_rawOut_sExp (io_rawOut_sExp_0), .io_rawOut_sig (_mulFullRaw_io_rawOut_sig) ); // @[MulRecFN.scala:84:28] assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:75:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_51( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_68 io_out_sink_valid ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_2( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_0, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [10:0] io_status_bits_set, // @[MSHR.scala:86:14] output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [3:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_0; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [8:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [10:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] reg [8:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [3:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106] wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66] wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } } File Types.scala: package constellation.routing import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import constellation.noc.{HasNoCParams} import constellation.channel.{Flit} /** A representation for 1 specific virtual channel in wormhole routing * * @param src the source node * @param vc ID for the virtual channel * @param dst the destination node * @param n_vc the number of virtual channels */ // BEGIN: ChannelRoutingInfo case class ChannelRoutingInfo( src: Int, dst: Int, vc: Int, n_vc: Int ) { // END: ChannelRoutingInfo require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this") require (!(src == -1 && dst == -1), s"Illegal $this") require (vc < n_vc, s"Illegal $this") val isIngress = src == -1 val isEgress = dst == -1 } /** Represents the properties of a packet that are relevant for routing * ingressId and egressId uniquely identify a flow, but vnet and dst are used here * to simplify the implementation of routingrelations * * @param ingressId packet's source ingress point * @param egressId packet's destination egress point * @param vNet virtual subnetwork identifier * @param dst packet's destination node ID */ // BEGIN: FlowRoutingInfo case class FlowRoutingInfo( ingressId: Int, egressId: Int, vNetId: Int, ingressNode: Int, ingressNodeId: Int, egressNode: Int, egressNodeId: Int, fifo: Boolean ) { // END: FlowRoutingInfo def isFlow(f: FlowRoutingBundle): Bool = { (f.ingress_node === ingressNode.U && f.egress_node === egressNode.U && f.ingress_node_id === ingressNodeId.U && f.egress_node_id === egressNodeId.U) } def asLiteral(b: FlowRoutingBundle): BigInt = { Seq( (vNetId , b.vnet_id), (ingressNode , b.ingress_node), (ingressNodeId , b.ingress_node_id), (egressNode , b.egress_node), (egressNodeId , b.egress_node_id) ).foldLeft(0)((l, t) => { (l << t._2.getWidth) | t._1 }) } } class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams { // Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination // This simplifies the routing tables val vnet_id = UInt(log2Ceil(nVirtualNetworks).W) val ingress_node = UInt(log2Ceil(nNodes).W) val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W) val egress_node = UInt(log2Ceil(nNodes).W) val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W) }
module NoCMonitor_30( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26] reg in_flight_3; // @[Monitor.scala:16:26] reg in_flight_4; // @[Monitor.scala:16:26] reg in_flight_5; // @[Monitor.scala:16:26] reg in_flight_6; // @[Monitor.scala:16:26] reg in_flight_7; // @[Monitor.scala:16:26] wire _GEN = io_in_flit_0_bits_virt_channel_id == 3'h0; // @[Monitor.scala:21:46]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_22( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_186( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_442 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module rerocc_tile_dcache_tag_array_0( // @[DescribedSRAM.scala:17:26] input [1:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [103:0] RW0_wdata, output [103:0] RW0_rdata, input [3:0] RW0_wmask ); rerocc_tile_dcache_tag_array_ext rerocc_tile_dcache_tag_array_ext ( // @[DescribedSRAM.scala:17:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata), .RW0_wmask (RW0_wmask) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_49( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire c_set = 1'h0; // @[Monitor.scala:738:34] wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [19:0] _c_sizes_set_T_1 = 20'h0; // @[Monitor.scala:768:52] wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35] wire [7:0] c_sizes_set = 8'h0; // @[Monitor.scala:741:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire _source_ok_T = ~io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire _source_ok_T_1 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_1; // @[Parameters.scala:1138:31] wire _T_1081 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1081; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1081; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_1154 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1154; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1154; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1154; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire a_set; // @[Monitor.scala:626:34] wire a_set_wo_ready; // @[Monitor.scala:627:34] wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [7:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [3:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}] wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [3:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [7:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [15:0] _a_size_lookup_T_6 = {8'h0, _a_size_lookup_T_1}; // @[Monitor.scala:641:{40,91}] wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [1:0] _GEN_3 = {1'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35] wire [1:0] _GEN_4 = 2'h1 << _GEN_3; // @[OneHot.scala:58:35] wire [1:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35] wire [1:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T & _a_set_wo_ready_T[0]; // @[OneHot.scala:58:35] wire _T_1007 = _T_1081 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1007 & _a_set_T[0]; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1007 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1007 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [3:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1007 ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [3:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :660:{52,77}] assign a_sizes_set = _T_1007 ? _a_sizes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire d_clr; // @[Monitor.scala:664:34] wire d_clr_wo_ready; // @[Monitor.scala:665:34] wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [7:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46] wire _T_1053 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [1:0] _GEN_6 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35] wire [1:0] _GEN_7 = 2'h1 << _GEN_6; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1053 & ~d_release_ack & _d_clr_wo_ready_T[0]; // @[OneHot.scala:58:35] wire _T_1022 = _T_1154 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1022 & _d_clr_T[0]; // @[OneHot.scala:58:35] wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1022 ? _d_opcodes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [30:0] _d_sizes_clr_T_5 = 31'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1022 ? _d_sizes_clr_T_5[7:0] : 8'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27] wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}] wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [7:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [7:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [7:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1:0] inflight_1; // @[Monitor.scala:726:35] wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [7:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:749:{44,97}] wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [7:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [15:0] _c_size_lookup_T_6 = {8'h0, _c_size_lookup_T_1}; // @[Monitor.scala:750:{42,93}] wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire d_clr_1; // @[Monitor.scala:774:34] wire d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [7:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1125 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1125 & d_release_ack_1 & _d_clr_wo_ready_T_1[0]; // @[OneHot.scala:58:35] wire _T_1107 = _T_1154 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1107 & _d_clr_T_1[0]; // @[OneHot.scala:58:35] wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1107 ? _d_opcodes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [30:0] _d_sizes_clr_T_11 = 31'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1107 ? _d_sizes_clr_T_11[7:0] : 8'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113] wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}] wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [7:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [7:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_145( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_159 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_30( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_41 io_out_source_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ClockGroup.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.prci import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.resources.FixedClockResource case class ClockGroupingNode(groupName: String)(implicit valName: ValName) extends MixedNexusNode(ClockGroupImp, ClockImp)( dFn = { _ => ClockSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) }) { override def circuitIdentity = outputs.size == 1 } class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupingNode(groupName) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip require (node.in.size == 1) require (in.member.size == out.size) (in.member.data zip out) foreach { case (i, o) => o := i } } } object ClockGroup { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node } case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))}) { override def circuitIdentity = outputs.size == 1 } class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupAggregateNode(groupName) override lazy val desiredName = s"ClockGroupAggregator_$groupName" lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in.unzip val (out, _) = node.out.unzip val outputs = out.flatMap(_.member.data) require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1") require (in.head.member.size == outputs.size) in.head.member.data.zip(outputs).foreach { case (i, o) => o := i } } } object ClockGroupAggregator { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node } class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule { val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val (out, _) = node.out.unzip out.map { out: ClockGroupBundle => out.member.data.foreach { o => o.clock := clock; o.reset := reset } } } } object SimpleClockGroupSource { def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node } case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName) extends NexusNode(ClockImp)( dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) }, uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) }, inputRequiresOutput = false) { def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix))) } class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule { val node = new FixedClockBroadcastNode(fixedClockOpt) { override def circuitIdentity = outputs.size == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip override def desiredName = s"FixedClockBroadcast_${out.size}" require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock") out.foreach { _ := in } } } object FixedClockBroadcast { def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node } case class PRCIClockGroupNode()(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { _ => ClockGroupSinkParameters("prci", Nil) }, outputRequiresInput = false) File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module ClockGroupAggregator_allClocks( // @[ClockGroup.scala:53:9] input auto_in_member_allClocks_clockTapNode_clock_tap_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_clockTapNode_clock_tap_reset, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_cbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_cbus_0_reset, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_mbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_mbus_0_reset, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_fbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_fbus_0_reset, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_pbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_pbus_0_reset, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_sbus_1_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_sbus_1_reset, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_sbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_in_member_allClocks_sbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_out_5_member_clockTapNode_clockTapNode_clock_tap_clock, // @[LazyModuleImp.scala:107:25] output auto_out_5_member_clockTapNode_clockTapNode_clock_tap_reset, // @[LazyModuleImp.scala:107:25] output auto_out_4_member_cbus_cbus_0_clock, // @[LazyModuleImp.scala:107:25] output auto_out_4_member_cbus_cbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_out_3_member_mbus_mbus_0_clock, // @[LazyModuleImp.scala:107:25] output auto_out_3_member_mbus_mbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_out_2_member_fbus_fbus_0_clock, // @[LazyModuleImp.scala:107:25] output auto_out_2_member_fbus_fbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_out_1_member_pbus_pbus_0_clock, // @[LazyModuleImp.scala:107:25] output auto_out_1_member_pbus_pbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_out_0_member_sbus_sbus_1_clock, // @[LazyModuleImp.scala:107:25] output auto_out_0_member_sbus_sbus_1_reset, // @[LazyModuleImp.scala:107:25] output auto_out_0_member_sbus_sbus_0_clock, // @[LazyModuleImp.scala:107:25] output auto_out_0_member_sbus_sbus_0_reset // @[LazyModuleImp.scala:107:25] ); wire auto_in_member_allClocks_clockTapNode_clock_tap_clock_0 = auto_in_member_allClocks_clockTapNode_clock_tap_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_clockTapNode_clock_tap_reset_0 = auto_in_member_allClocks_clockTapNode_clock_tap_reset; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_cbus_0_clock_0 = auto_in_member_allClocks_cbus_0_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_cbus_0_reset_0 = auto_in_member_allClocks_cbus_0_reset; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_mbus_0_clock_0 = auto_in_member_allClocks_mbus_0_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_mbus_0_reset_0 = auto_in_member_allClocks_mbus_0_reset; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_fbus_0_clock_0 = auto_in_member_allClocks_fbus_0_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_fbus_0_reset_0 = auto_in_member_allClocks_fbus_0_reset; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_pbus_0_clock_0 = auto_in_member_allClocks_pbus_0_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_pbus_0_reset_0 = auto_in_member_allClocks_pbus_0_reset; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_sbus_1_clock_0 = auto_in_member_allClocks_sbus_1_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_sbus_1_reset_0 = auto_in_member_allClocks_sbus_1_reset; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_sbus_0_clock_0 = auto_in_member_allClocks_sbus_0_clock; // @[ClockGroup.scala:53:9] wire auto_in_member_allClocks_sbus_0_reset_0 = auto_in_member_allClocks_sbus_0_reset; // @[ClockGroup.scala:53:9] wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nodeIn_member_allClocks_clockTapNode_clock_tap_clock = auto_in_member_allClocks_clockTapNode_clock_tap_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_clockTapNode_clock_tap_reset = auto_in_member_allClocks_clockTapNode_clock_tap_reset_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_cbus_0_clock = auto_in_member_allClocks_cbus_0_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_cbus_0_reset = auto_in_member_allClocks_cbus_0_reset_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_mbus_0_clock = auto_in_member_allClocks_mbus_0_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_mbus_0_reset = auto_in_member_allClocks_mbus_0_reset_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_fbus_0_clock = auto_in_member_allClocks_fbus_0_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_fbus_0_reset = auto_in_member_allClocks_fbus_0_reset_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_pbus_0_clock = auto_in_member_allClocks_pbus_0_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_pbus_0_reset = auto_in_member_allClocks_pbus_0_reset_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_sbus_1_clock = auto_in_member_allClocks_sbus_1_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_sbus_1_reset = auto_in_member_allClocks_sbus_1_reset_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_sbus_0_clock = auto_in_member_allClocks_sbus_0_clock_0; // @[ClockGroup.scala:53:9] wire nodeIn_member_allClocks_sbus_0_reset = auto_in_member_allClocks_sbus_0_reset_0; // @[ClockGroup.scala:53:9] wire x1_nodeOut_4_member_clockTapNode_clockTapNode_clock_tap_clock; // @[MixedNode.scala:542:17] wire x1_nodeOut_4_member_clockTapNode_clockTapNode_clock_tap_reset; // @[MixedNode.scala:542:17] wire x1_nodeOut_3_member_cbus_cbus_0_clock; // @[MixedNode.scala:542:17] wire x1_nodeOut_3_member_cbus_cbus_0_reset; // @[MixedNode.scala:542:17] wire x1_nodeOut_2_member_mbus_mbus_0_clock; // @[MixedNode.scala:542:17] wire x1_nodeOut_2_member_mbus_mbus_0_reset; // @[MixedNode.scala:542:17] wire x1_nodeOut_1_member_fbus_fbus_0_clock; // @[MixedNode.scala:542:17] wire x1_nodeOut_1_member_fbus_fbus_0_reset; // @[MixedNode.scala:542:17] wire x1_nodeOut_member_pbus_pbus_0_clock; // @[MixedNode.scala:542:17] wire x1_nodeOut_member_pbus_pbus_0_reset; // @[MixedNode.scala:542:17] wire nodeOut_member_sbus_sbus_1_clock; // @[MixedNode.scala:542:17] wire nodeOut_member_sbus_sbus_1_reset; // @[MixedNode.scala:542:17] wire nodeOut_member_sbus_sbus_0_clock; // @[MixedNode.scala:542:17] wire nodeOut_member_sbus_sbus_0_reset; // @[MixedNode.scala:542:17] wire auto_out_5_member_clockTapNode_clockTapNode_clock_tap_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_5_member_clockTapNode_clockTapNode_clock_tap_reset_0; // @[ClockGroup.scala:53:9] wire auto_out_4_member_cbus_cbus_0_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_4_member_cbus_cbus_0_reset_0; // @[ClockGroup.scala:53:9] wire auto_out_3_member_mbus_mbus_0_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_3_member_mbus_mbus_0_reset_0; // @[ClockGroup.scala:53:9] wire auto_out_2_member_fbus_fbus_0_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_2_member_fbus_fbus_0_reset_0; // @[ClockGroup.scala:53:9] wire auto_out_1_member_pbus_pbus_0_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_1_member_pbus_pbus_0_reset_0; // @[ClockGroup.scala:53:9] wire auto_out_0_member_sbus_sbus_1_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_0_member_sbus_sbus_1_reset_0; // @[ClockGroup.scala:53:9] wire auto_out_0_member_sbus_sbus_0_clock_0; // @[ClockGroup.scala:53:9] wire auto_out_0_member_sbus_sbus_0_reset_0; // @[ClockGroup.scala:53:9] assign x1_nodeOut_4_member_clockTapNode_clockTapNode_clock_tap_clock = nodeIn_member_allClocks_clockTapNode_clock_tap_clock; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_4_member_clockTapNode_clockTapNode_clock_tap_reset = nodeIn_member_allClocks_clockTapNode_clock_tap_reset; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_3_member_cbus_cbus_0_clock = nodeIn_member_allClocks_cbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_3_member_cbus_cbus_0_reset = nodeIn_member_allClocks_cbus_0_reset; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_2_member_mbus_mbus_0_clock = nodeIn_member_allClocks_mbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_2_member_mbus_mbus_0_reset = nodeIn_member_allClocks_mbus_0_reset; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_1_member_fbus_fbus_0_clock = nodeIn_member_allClocks_fbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_1_member_fbus_fbus_0_reset = nodeIn_member_allClocks_fbus_0_reset; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_member_pbus_pbus_0_clock = nodeIn_member_allClocks_pbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_member_pbus_pbus_0_reset = nodeIn_member_allClocks_pbus_0_reset; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_member_sbus_sbus_1_clock = nodeIn_member_allClocks_sbus_1_clock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_member_sbus_sbus_1_reset = nodeIn_member_allClocks_sbus_1_reset; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_member_sbus_sbus_0_clock = nodeIn_member_allClocks_sbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_member_sbus_sbus_0_reset = nodeIn_member_allClocks_sbus_0_reset; // @[MixedNode.scala:542:17, :551:17] assign auto_out_0_member_sbus_sbus_1_clock_0 = nodeOut_member_sbus_sbus_1_clock; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_1_reset_0 = nodeOut_member_sbus_sbus_1_reset; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_0_clock_0 = nodeOut_member_sbus_sbus_0_clock; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_0_reset_0 = nodeOut_member_sbus_sbus_0_reset; // @[ClockGroup.scala:53:9] assign auto_out_1_member_pbus_pbus_0_clock_0 = x1_nodeOut_member_pbus_pbus_0_clock; // @[ClockGroup.scala:53:9] assign auto_out_1_member_pbus_pbus_0_reset_0 = x1_nodeOut_member_pbus_pbus_0_reset; // @[ClockGroup.scala:53:9] assign auto_out_2_member_fbus_fbus_0_clock_0 = x1_nodeOut_1_member_fbus_fbus_0_clock; // @[ClockGroup.scala:53:9] assign auto_out_2_member_fbus_fbus_0_reset_0 = x1_nodeOut_1_member_fbus_fbus_0_reset; // @[ClockGroup.scala:53:9] assign auto_out_3_member_mbus_mbus_0_clock_0 = x1_nodeOut_2_member_mbus_mbus_0_clock; // @[ClockGroup.scala:53:9] assign auto_out_3_member_mbus_mbus_0_reset_0 = x1_nodeOut_2_member_mbus_mbus_0_reset; // @[ClockGroup.scala:53:9] assign auto_out_4_member_cbus_cbus_0_clock_0 = x1_nodeOut_3_member_cbus_cbus_0_clock; // @[ClockGroup.scala:53:9] assign auto_out_4_member_cbus_cbus_0_reset_0 = x1_nodeOut_3_member_cbus_cbus_0_reset; // @[ClockGroup.scala:53:9] assign auto_out_5_member_clockTapNode_clockTapNode_clock_tap_clock_0 = x1_nodeOut_4_member_clockTapNode_clockTapNode_clock_tap_clock; // @[ClockGroup.scala:53:9] assign auto_out_5_member_clockTapNode_clockTapNode_clock_tap_reset_0 = x1_nodeOut_4_member_clockTapNode_clockTapNode_clock_tap_reset; // @[ClockGroup.scala:53:9] assign auto_out_5_member_clockTapNode_clockTapNode_clock_tap_clock = auto_out_5_member_clockTapNode_clockTapNode_clock_tap_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_5_member_clockTapNode_clockTapNode_clock_tap_reset = auto_out_5_member_clockTapNode_clockTapNode_clock_tap_reset_0; // @[ClockGroup.scala:53:9] assign auto_out_4_member_cbus_cbus_0_clock = auto_out_4_member_cbus_cbus_0_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_4_member_cbus_cbus_0_reset = auto_out_4_member_cbus_cbus_0_reset_0; // @[ClockGroup.scala:53:9] assign auto_out_3_member_mbus_mbus_0_clock = auto_out_3_member_mbus_mbus_0_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_3_member_mbus_mbus_0_reset = auto_out_3_member_mbus_mbus_0_reset_0; // @[ClockGroup.scala:53:9] assign auto_out_2_member_fbus_fbus_0_clock = auto_out_2_member_fbus_fbus_0_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_2_member_fbus_fbus_0_reset = auto_out_2_member_fbus_fbus_0_reset_0; // @[ClockGroup.scala:53:9] assign auto_out_1_member_pbus_pbus_0_clock = auto_out_1_member_pbus_pbus_0_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_1_member_pbus_pbus_0_reset = auto_out_1_member_pbus_pbus_0_reset_0; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_1_clock = auto_out_0_member_sbus_sbus_1_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_1_reset = auto_out_0_member_sbus_sbus_1_reset_0; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_0_clock = auto_out_0_member_sbus_sbus_0_clock_0; // @[ClockGroup.scala:53:9] assign auto_out_0_member_sbus_sbus_0_reset = auto_out_0_member_sbus_sbus_0_reset_0; // @[ClockGroup.scala:53:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File NullIntSource.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ /** Useful for stubbing out parts of an interrupt interface where certain devices might be missing */ class NullIntSource(num: Int = 1, ports: Int = 1, sources: Int = 1)(implicit p: Parameters) extends LazyModule { val intnode = IntSourceNode(IntSourcePortSimple(num, ports, sources)) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { intnode.out.foreach { case (o, _) => o.foreach { _ := false.B } } } } object NullIntSource { def apply(num: Int = 1, ports: Int = 1, sources: Int = 1)(implicit p: Parameters): IntOutwardNode = { val null_int_source = LazyModule(new NullIntSource(num, ports, sources)) null_int_source.intnode } } object NullIntSyncSource { def apply(num: Int = 1, ports: Int = 1, sources: Int = 1)(implicit p: Parameters): IntSyncOutwardNode = { IntSyncCrossingSource(alreadyRegistered = true) := NullIntSource(num, ports, sources) } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module NullIntSource_5(); // @[NullIntSource.scala:16:9] wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire intnodeOut_0 = 1'h0; // @[MixedNode.scala:542:17] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_130( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_386 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w4_d3_i0_17( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input [3:0] io_d, // @[ShiftReg.scala:36:14] output [3:0] io_q // @[ShiftReg.scala:36:14] ); wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21] wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14] wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7] wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_1; // @[ShiftReg.scala:48:24] wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_2; // @[ShiftReg.scala:48:24] wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_3; // @[ShiftReg.scala:48:24] wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14] wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14] assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14] assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_173 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_174 output_chain_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_2), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_3), // @[SynchronizerReg.scala:87:41] .io_q (output_1) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_175 output_chain_2 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_4), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_5), // @[SynchronizerReg.scala:87:41] .io_q (output_2) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_176 output_chain_3 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_6), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_7), // @[SynchronizerReg.scala:87:41] .io_q (output_3) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_194( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_211 io_out_sink_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File SourceD.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import TLMessages._ import TLAtomics._ import TLPermissions._ class SourceDRequest(params: InclusiveCacheParameters) extends FullRequest(params) { val sink = UInt(params.inner.bundle.sinkBits.W) val way = UInt(params.wayBits.W) val bad = Bool() } class SourceDHazard(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val way = UInt(params.wayBits.W) } class PutBufferACEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val data = UInt(params.inner.bundle.dataBits.W) val mask = UInt((params.inner.bundle.dataBits/8).W) val corrupt = Bool() } class SourceD(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val req = Flipped(Decoupled(new SourceDRequest(params))) val d = Decoupled(new TLBundleD(params.inner.bundle)) // Put data from SinkA val pb_pop = Decoupled(new PutBufferPop(params)) val pb_beat = Flipped(new PutBufferAEntry(params)) // Release data from SinkC val rel_pop = Decoupled(new PutBufferPop(params)) val rel_beat = Flipped(new PutBufferCEntry(params)) // Access to the BankedStore val bs_radr = Decoupled(new BankedStoreInnerAddress(params)) val bs_rdat = Flipped(new BankedStoreInnerDecoded(params)) val bs_wadr = Decoupled(new BankedStoreInnerAddress(params)) val bs_wdat = new BankedStoreInnerPoison(params) // Is it safe to evict/replace this way? val evict_req = Flipped(new SourceDHazard(params)) val evict_safe = Bool() val grant_req = Flipped(new SourceDHazard(params)) val grant_safe = Bool() }) val beatBytes = params.inner.manager.beatBytes val writeBytes = params.micro.writeBytes val s1_valid = Wire(Bool()) val s2_valid = Wire(Bool()) val s3_valid = Wire(Bool()) val s2_ready = Wire(Bool()) val s3_ready = Wire(Bool()) val s4_ready = Wire(Bool()) ////////////////////////////////////// STAGE 1 ////////////////////////////////////// // Reform the request beats val busy = RegInit(false.B) val s1_block_r = RegInit(false.B) val s1_counter = RegInit(0.U(params.innerBeatBits.W)) val s1_req_reg = RegEnable(io.req.bits, !busy && io.req.valid) val s1_req = Mux(!busy, io.req.bits, s1_req_reg) val s1_x_bypass = Wire(UInt((beatBytes/writeBytes).W)) // might go from high=>low during stall val s1_latch_bypass = RegNext(!(busy || io.req.valid) || s2_ready) val s1_bypass = Mux(s1_latch_bypass, s1_x_bypass, RegEnable(s1_x_bypass, s1_latch_bypass)) val s1_mask = MaskGen(s1_req.offset, s1_req.size, beatBytes, writeBytes) & ~s1_bypass val s1_grant = (s1_req.opcode === AcquireBlock && s1_req.param === BtoT) || s1_req.opcode === AcquirePerm val s1_need_r = s1_mask.orR && s1_req.prio(0) && s1_req.opcode =/= Hint && !s1_grant && (s1_req.opcode =/= PutFullData || s1_req.size < log2Ceil(writeBytes).U ) val s1_valid_r = (busy || io.req.valid) && s1_need_r && !s1_block_r val s1_need_pb = Mux(s1_req.prio(0), !s1_req.opcode(2), s1_req.opcode(0)) // hasData val s1_single = Mux(s1_req.prio(0), s1_req.opcode === Hint || s1_grant, s1_req.opcode === Release) val s1_retires = !s1_single // retire all operations with data in s3 for bypass (saves energy) // Alternatively: val s1_retires = s1_need_pb // retire only updates for bypass (less backpressure from WB) val s1_beats1 = Mux(s1_single, 0.U, UIntToOH1(s1_req.size, log2Up(params.cache.blockBytes)) >> log2Ceil(beatBytes)) val s1_beat = (s1_req.offset >> log2Ceil(beatBytes)) | s1_counter val s1_last = s1_counter === s1_beats1 val s1_first = s1_counter === 0.U params.ccover(s1_block_r, "SOURCED_1_SRAM_HOLD", "SRAM read-out successful, but stalled by stage 2") params.ccover(!s1_latch_bypass, "SOURCED_1_BYPASS_HOLD", "Bypass match successful, but stalled by stage 2") params.ccover((busy || io.req.valid) && !s1_need_r, "SOURCED_1_NO_MODIFY", "Transaction servicable without SRAM") io.bs_radr.valid := s1_valid_r io.bs_radr.bits.noop := false.B io.bs_radr.bits.way := s1_req.way io.bs_radr.bits.set := s1_req.set io.bs_radr.bits.beat := s1_beat io.bs_radr.bits.mask := s1_mask params.ccover(io.bs_radr.valid && !io.bs_radr.ready, "SOURCED_1_READ_STALL", "Data readout stalled") // Make a queue to catch BS readout during stalls val queue = Module(new Queue(chiselTypeOf(io.bs_rdat), 3, flow=true)) queue.io.enq.valid := RegNext(RegNext(io.bs_radr.fire)) queue.io.enq.bits := io.bs_rdat assert (!queue.io.enq.valid || queue.io.enq.ready) params.ccover(!queue.io.enq.ready, "SOURCED_1_QUEUE_FULL", "Filled SRAM skidpad queue completely") when (io.bs_radr.fire) { s1_block_r := true.B } when (io.req.valid) { busy := true.B } when (s1_valid && s2_ready) { s1_counter := s1_counter + 1.U s1_block_r := false.B when (s1_last) { s1_counter := 0.U busy := false.B } } params.ccover(s1_valid && !s2_ready, "SOURCED_1_STALL", "Stage 1 pipeline blocked") io.req.ready := !busy s1_valid := (busy || io.req.valid) && (!s1_valid_r || io.bs_radr.ready) ////////////////////////////////////// STAGE 2 ////////////////////////////////////// // Fetch the request data val s2_latch = s1_valid && s2_ready val s2_full = RegInit(false.B) val s2_valid_pb = RegInit(false.B) val s2_beat = RegEnable(s1_beat, s2_latch) val s2_bypass = RegEnable(s1_bypass, s2_latch) val s2_req = RegEnable(s1_req, s2_latch) val s2_last = RegEnable(s1_last, s2_latch) val s2_need_r = RegEnable(s1_need_r, s2_latch) val s2_need_pb = RegEnable(s1_need_pb, s2_latch) val s2_retires = RegEnable(s1_retires, s2_latch) val s2_need_d = RegEnable(!s1_need_pb || s1_first, s2_latch) val s2_pdata_raw = Wire(new PutBufferACEntry(params)) val s2_pdata = s2_pdata_raw holdUnless s2_valid_pb s2_pdata_raw.data := Mux(s2_req.prio(0), io.pb_beat.data, io.rel_beat.data) s2_pdata_raw.mask := Mux(s2_req.prio(0), io.pb_beat.mask, ~0.U(params.inner.manager.beatBytes.W)) s2_pdata_raw.corrupt := Mux(s2_req.prio(0), io.pb_beat.corrupt, io.rel_beat.corrupt) io.pb_pop.valid := s2_valid_pb && s2_req.prio(0) io.pb_pop.bits.index := s2_req.put io.pb_pop.bits.last := s2_last io.rel_pop.valid := s2_valid_pb && !s2_req.prio(0) io.rel_pop.bits.index := s2_req.put io.rel_pop.bits.last := s2_last params.ccover(io.pb_pop.valid && !io.pb_pop.ready, "SOURCED_2_PUTA_STALL", "Channel A put buffer was not ready in time") if (!params.firstLevel) params.ccover(io.rel_pop.valid && !io.rel_pop.ready, "SOURCED_2_PUTC_STALL", "Channel C put buffer was not ready in time") val pb_ready = Mux(s2_req.prio(0), io.pb_pop.ready, io.rel_pop.ready) when (pb_ready) { s2_valid_pb := false.B } when (s2_valid && s3_ready) { s2_full := false.B } when (s2_latch) { s2_valid_pb := s1_need_pb } when (s2_latch) { s2_full := true.B } params.ccover(s2_valid && !s3_ready, "SOURCED_2_STALL", "Stage 2 pipeline blocked") s2_valid := s2_full && (!s2_valid_pb || pb_ready) s2_ready := !s2_full || (s3_ready && (!s2_valid_pb || pb_ready)) ////////////////////////////////////// STAGE 3 ////////////////////////////////////// // Send D response val s3_latch = s2_valid && s3_ready val s3_full = RegInit(false.B) val s3_valid_d = RegInit(false.B) val s3_beat = RegEnable(s2_beat, s3_latch) val s3_bypass = RegEnable(s2_bypass, s3_latch) val s3_req = RegEnable(s2_req, s3_latch) val s3_adjusted_opcode = Mux(s3_req.bad, Get, s3_req.opcode) // kill update when denied val s3_last = RegEnable(s2_last, s3_latch) val s3_pdata = RegEnable(s2_pdata, s3_latch) val s3_need_pb = RegEnable(s2_need_pb, s3_latch) val s3_retires = RegEnable(s2_retires, s3_latch) val s3_need_r = RegEnable(s2_need_r, s3_latch) val s3_need_bs = s3_need_pb val s3_acq = s3_req.opcode === AcquireBlock || s3_req.opcode === AcquirePerm // Collect s3's data from either the BankedStore or bypass // NOTE: we use the s3_bypass passed down from s1_bypass, because s2-s4 were guarded by the hazard checks and not stale val s3_bypass_data = Wire(UInt()) def chunk(x: UInt): Seq[UInt] = Seq.tabulate(beatBytes/writeBytes) { i => x((i+1)*writeBytes*8-1, i*writeBytes*8) } def chop (x: UInt): Seq[Bool] = Seq.tabulate(beatBytes/writeBytes) { i => x(i) } def bypass(sel: UInt, x: UInt, y: UInt) = (chop(sel) zip (chunk(x) zip chunk(y))) .map { case (s, (x, y)) => Mux(s, x, y) } .asUInt val s3_rdata = bypass(s3_bypass, s3_bypass_data, queue.io.deq.bits.data) // Lookup table for response codes val grant = Mux(s3_req.param === BtoT, Grant, GrantData) val resp_opcode = VecInit(Seq(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, grant, Grant)) // No restrictions on the type of buffer used here val d = Wire(chiselTypeOf(io.d)) io.d <> params.micro.innerBuf.d(d) d.valid := s3_valid_d d.bits.opcode := Mux(s3_req.prio(0), resp_opcode(s3_req.opcode), ReleaseAck) d.bits.param := Mux(s3_req.prio(0) && s3_acq, Mux(s3_req.param =/= NtoB, toT, toB), 0.U) d.bits.size := s3_req.size d.bits.source := s3_req.source d.bits.sink := s3_req.sink d.bits.denied := s3_req.bad d.bits.data := s3_rdata d.bits.corrupt := s3_req.bad && d.bits.opcode(0) queue.io.deq.ready := s3_valid && s4_ready && s3_need_r assert (!s3_full || !s3_need_r || queue.io.deq.valid) when (d.ready) { s3_valid_d := false.B } when (s3_valid && s4_ready) { s3_full := false.B } when (s3_latch) { s3_valid_d := s2_need_d } when (s3_latch) { s3_full := true.B } params.ccover(s3_valid && !s4_ready, "SOURCED_3_STALL", "Stage 3 pipeline blocked") s3_valid := s3_full && (!s3_valid_d || d.ready) s3_ready := !s3_full || (s4_ready && (!s3_valid_d || d.ready)) ////////////////////////////////////// STAGE 4 ////////////////////////////////////// // Writeback updated data val s4_latch = s3_valid && s3_retires && s4_ready val s4_full = RegInit(false.B) val s4_beat = RegEnable(s3_beat, s4_latch) val s4_need_r = RegEnable(s3_need_r, s4_latch) val s4_need_bs = RegEnable(s3_need_bs, s4_latch) val s4_need_pb = RegEnable(s3_need_pb, s4_latch) val s4_req = RegEnable(s3_req, s4_latch) val s4_adjusted_opcode = RegEnable(s3_adjusted_opcode, s4_latch) val s4_pdata = RegEnable(s3_pdata, s4_latch) val s4_rdata = RegEnable(s3_rdata, s4_latch) val atomics = Module(new Atomics(params.inner.bundle)) atomics.io.write := s4_req.prio(2) atomics.io.a.opcode := s4_adjusted_opcode atomics.io.a.param := s4_req.param atomics.io.a.size := 0.U atomics.io.a.source := 0.U atomics.io.a.address := 0.U atomics.io.a.mask := s4_pdata.mask atomics.io.a.data := s4_pdata.data atomics.io.a.corrupt := DontCare atomics.io.data_in := s4_rdata io.bs_wadr.valid := s4_full && s4_need_bs io.bs_wadr.bits.noop := false.B io.bs_wadr.bits.way := s4_req.way io.bs_wadr.bits.set := s4_req.set io.bs_wadr.bits.beat := s4_beat io.bs_wadr.bits.mask := Cat(s4_pdata.mask.asBools.grouped(writeBytes).map(_.reduce(_||_)).toList.reverse) io.bs_wdat.data := atomics.io.data_out assert (!(s4_full && s4_need_pb && s4_pdata.corrupt), "Data poisoning unsupported") params.ccover(io.bs_wadr.valid && !io.bs_wadr.ready, "SOURCED_4_WRITEBACK_STALL", "Data writeback stalled") params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MIN, "SOURCED_4_ATOMIC_MIN", "Evaluated a signed minimum atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MAX, "SOURCED_4_ATOMIC_MAX", "Evaluated a signed maximum atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MINU, "SOURCED_4_ATOMIC_MINU", "Evaluated an unsigned minimum atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === MAXU, "SOURCED_4_ATOMIC_MAXU", "Evaluated an unsigned minimum atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === ArithmeticData && s4_req.param === ADD, "SOURCED_4_ATOMIC_ADD", "Evaluated an addition atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === XOR, "SOURCED_4_ATOMIC_XOR", "Evaluated a bitwise XOR atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === OR, "SOURCED_4_ATOMIC_OR", "Evaluated a bitwise OR atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === AND, "SOURCED_4_ATOMIC_AND", "Evaluated a bitwise AND atomic") params.ccover(s4_req.prio(0) && s4_req.opcode === LogicalData && s4_req.param === SWAP, "SOURCED_4_ATOMIC_SWAP", "Evaluated a bitwise SWAP atomic") when (io.bs_wadr.ready || !s4_need_bs) { s4_full := false.B } when (s4_latch) { s4_full := true.B } s4_ready := !s3_retires || !s4_full || io.bs_wadr.ready || !s4_need_bs ////////////////////////////////////// RETIRED ////////////////////////////////////// // Record for bypass the last three retired writebacks // We need 3 slots to collect what was in s2, s3, s4 when the request was in s1 // ... you can't rely on s4 being full if bubbles got introduced between s1 and s2 val retire = s4_full && (io.bs_wadr.ready || !s4_need_bs) val s5_req = RegEnable(s4_req, retire) val s5_beat = RegEnable(s4_beat, retire) val s5_dat = RegEnable(atomics.io.data_out, retire) val s6_req = RegEnable(s5_req, retire) val s6_beat = RegEnable(s5_beat, retire) val s6_dat = RegEnable(s5_dat, retire) val s7_dat = RegEnable(s6_dat, retire) ////////////////////////////////////// BYPASSS ////////////////////////////////////// // Manually retime this circuit to pull a register stage forward val pre_s3_req = Mux(s3_latch, s2_req, s3_req) val pre_s4_req = Mux(s4_latch, s3_req, s4_req) val pre_s5_req = Mux(retire, s4_req, s5_req) val pre_s6_req = Mux(retire, s5_req, s6_req) val pre_s3_beat = Mux(s3_latch, s2_beat, s3_beat) val pre_s4_beat = Mux(s4_latch, s3_beat, s4_beat) val pre_s5_beat = Mux(retire, s4_beat, s5_beat) val pre_s6_beat = Mux(retire, s5_beat, s6_beat) val pre_s5_dat = Mux(retire, atomics.io.data_out, s5_dat) val pre_s6_dat = Mux(retire, s5_dat, s6_dat) val pre_s7_dat = Mux(retire, s6_dat, s7_dat) val pre_s4_full = s4_latch || (!(io.bs_wadr.ready || !s4_need_bs) && s4_full) val pre_s3_4_match = pre_s4_req.set === pre_s3_req.set && pre_s4_req.way === pre_s3_req.way && pre_s4_beat === pre_s3_beat && pre_s4_full val pre_s3_5_match = pre_s5_req.set === pre_s3_req.set && pre_s5_req.way === pre_s3_req.way && pre_s5_beat === pre_s3_beat val pre_s3_6_match = pre_s6_req.set === pre_s3_req.set && pre_s6_req.way === pre_s3_req.way && pre_s6_beat === pre_s3_beat val pre_s3_4_bypass = Mux(pre_s3_4_match, MaskGen(pre_s4_req.offset, pre_s4_req.size, beatBytes, writeBytes), 0.U) val pre_s3_5_bypass = Mux(pre_s3_5_match, MaskGen(pre_s5_req.offset, pre_s5_req.size, beatBytes, writeBytes), 0.U) val pre_s3_6_bypass = Mux(pre_s3_6_match, MaskGen(pre_s6_req.offset, pre_s6_req.size, beatBytes, writeBytes), 0.U) s3_bypass_data := bypass(RegNext(pre_s3_4_bypass), atomics.io.data_out, RegNext( bypass(pre_s3_5_bypass, pre_s5_dat, bypass(pre_s3_6_bypass, pre_s6_dat, pre_s7_dat)))) // Detect which parts of s1 will be bypassed from later pipeline stages (s1-s4) // Note: we also bypass from reads ahead in the pipeline to save power val s1_2_match = s2_req.set === s1_req.set && s2_req.way === s1_req.way && s2_beat === s1_beat && s2_full && s2_retires val s1_3_match = s3_req.set === s1_req.set && s3_req.way === s1_req.way && s3_beat === s1_beat && s3_full && s3_retires val s1_4_match = s4_req.set === s1_req.set && s4_req.way === s1_req.way && s4_beat === s1_beat && s4_full for (i <- 0 until 8) { val cover = 1.U val s2 = s1_2_match === cover(0) val s3 = s1_3_match === cover(1) val s4 = s1_4_match === cover(2) params.ccover(io.req.valid && s2 && s3 && s4, "SOURCED_BYPASS_CASE_" + i, "Bypass data from all subsets of pipeline stages") } val s1_2_bypass = Mux(s1_2_match, MaskGen(s2_req.offset, s2_req.size, beatBytes, writeBytes), 0.U) val s1_3_bypass = Mux(s1_3_match, MaskGen(s3_req.offset, s3_req.size, beatBytes, writeBytes), 0.U) val s1_4_bypass = Mux(s1_4_match, MaskGen(s4_req.offset, s4_req.size, beatBytes, writeBytes), 0.U) s1_x_bypass := s1_2_bypass | s1_3_bypass | s1_4_bypass ////////////////////////////////////// HAZARDS ////////////////////////////////////// // SinkC, SourceC, and SinkD can never interfer with each other because their operation // is fully contained with an execution plan of an MSHR. That MSHR owns the entire set, so // there is no way for a data race. // However, SourceD is special. We allow it to run ahead after the MSHR and scheduler have // released control of a set+way. This is necessary to allow single cycle occupancy for // hits. Thus, we need to be careful about data hazards between SourceD and the other ports // of the BankedStore. We can at least compare to registers 's1_req_reg', because the first // cycle of SourceD falls within the occupancy of the MSHR's plan. // Must ReleaseData=> be interlocked? RaW hazard io.evict_safe := (!busy || io.evict_req.way =/= s1_req_reg.way || io.evict_req.set =/= s1_req_reg.set) && (!s2_full || io.evict_req.way =/= s2_req.way || io.evict_req.set =/= s2_req.set) && (!s3_full || io.evict_req.way =/= s3_req.way || io.evict_req.set =/= s3_req.set) && (!s4_full || io.evict_req.way =/= s4_req.way || io.evict_req.set =/= s4_req.set) // Must =>GrantData be interlocked? WaR hazard io.grant_safe := (!busy || io.grant_req.way =/= s1_req_reg.way || io.grant_req.set =/= s1_req_reg.set) && (!s2_full || io.grant_req.way =/= s2_req.way || io.grant_req.set =/= s2_req.set) && (!s3_full || io.grant_req.way =/= s3_req.way || io.grant_req.set =/= s3_req.set) && (!s4_full || io.grant_req.way =/= s4_req.way || io.grant_req.set =/= s4_req.set) // SourceD cannot overlap with SinkC b/c the only way inner caches could become // dirty such that they want to put data in via SinkC is if we Granted them permissions, // which must flow through the SourecD pipeline. }
module SourceD( // @[SourceD.scala:48:7] input clock, // @[SourceD.scala:48:7] input reset, // @[SourceD.scala:48:7] output io_req_ready, // @[SourceD.scala:50:14] input io_req_valid, // @[SourceD.scala:50:14] input io_req_bits_prio_0, // @[SourceD.scala:50:14] input io_req_bits_prio_1, // @[SourceD.scala:50:14] input io_req_bits_prio_2, // @[SourceD.scala:50:14] input io_req_bits_control, // @[SourceD.scala:50:14] input [2:0] io_req_bits_opcode, // @[SourceD.scala:50:14] input [2:0] io_req_bits_param, // @[SourceD.scala:50:14] input [2:0] io_req_bits_size, // @[SourceD.scala:50:14] input [7:0] io_req_bits_source, // @[SourceD.scala:50:14] input [12:0] io_req_bits_tag, // @[SourceD.scala:50:14] input [5:0] io_req_bits_offset, // @[SourceD.scala:50:14] input [5:0] io_req_bits_put, // @[SourceD.scala:50:14] input [9:0] io_req_bits_set, // @[SourceD.scala:50:14] input [4:0] io_req_bits_sink, // @[SourceD.scala:50:14] input [2:0] io_req_bits_way, // @[SourceD.scala:50:14] input io_req_bits_bad, // @[SourceD.scala:50:14] input io_d_ready, // @[SourceD.scala:50:14] output io_d_valid, // @[SourceD.scala:50:14] output [2:0] io_d_bits_opcode, // @[SourceD.scala:50:14] output [1:0] io_d_bits_param, // @[SourceD.scala:50:14] output [2:0] io_d_bits_size, // @[SourceD.scala:50:14] output [7:0] io_d_bits_source, // @[SourceD.scala:50:14] output [4:0] io_d_bits_sink, // @[SourceD.scala:50:14] output io_d_bits_denied, // @[SourceD.scala:50:14] output [255:0] io_d_bits_data, // @[SourceD.scala:50:14] output io_d_bits_corrupt, // @[SourceD.scala:50:14] input io_pb_pop_ready, // @[SourceD.scala:50:14] output io_pb_pop_valid, // @[SourceD.scala:50:14] output [5:0] io_pb_pop_bits_index, // @[SourceD.scala:50:14] output io_pb_pop_bits_last, // @[SourceD.scala:50:14] input [255:0] io_pb_beat_data, // @[SourceD.scala:50:14] input [31:0] io_pb_beat_mask, // @[SourceD.scala:50:14] input io_pb_beat_corrupt, // @[SourceD.scala:50:14] input io_rel_pop_ready, // @[SourceD.scala:50:14] output io_rel_pop_valid, // @[SourceD.scala:50:14] output [5:0] io_rel_pop_bits_index, // @[SourceD.scala:50:14] output io_rel_pop_bits_last, // @[SourceD.scala:50:14] input [255:0] io_rel_beat_data, // @[SourceD.scala:50:14] input io_rel_beat_corrupt, // @[SourceD.scala:50:14] input io_bs_radr_ready, // @[SourceD.scala:50:14] output io_bs_radr_valid, // @[SourceD.scala:50:14] output [2:0] io_bs_radr_bits_way, // @[SourceD.scala:50:14] output [9:0] io_bs_radr_bits_set, // @[SourceD.scala:50:14] output io_bs_radr_bits_beat, // @[SourceD.scala:50:14] output [3:0] io_bs_radr_bits_mask, // @[SourceD.scala:50:14] input [255:0] io_bs_rdat_data, // @[SourceD.scala:50:14] input io_bs_wadr_ready, // @[SourceD.scala:50:14] output io_bs_wadr_valid, // @[SourceD.scala:50:14] output [2:0] io_bs_wadr_bits_way, // @[SourceD.scala:50:14] output [9:0] io_bs_wadr_bits_set, // @[SourceD.scala:50:14] output io_bs_wadr_bits_beat, // @[SourceD.scala:50:14] output [3:0] io_bs_wadr_bits_mask, // @[SourceD.scala:50:14] output [255:0] io_bs_wdat_data, // @[SourceD.scala:50:14] input [9:0] io_evict_req_set, // @[SourceD.scala:50:14] input [2:0] io_evict_req_way, // @[SourceD.scala:50:14] output io_evict_safe, // @[SourceD.scala:50:14] input [9:0] io_grant_req_set, // @[SourceD.scala:50:14] input [2:0] io_grant_req_way, // @[SourceD.scala:50:14] output io_grant_safe // @[SourceD.scala:50:14] ); wire [255:0] _atomics_io_data_out; // @[SourceD.scala:258:23] wire _queue_io_enq_ready; // @[SourceD.scala:120:21] wire _queue_io_deq_valid; // @[SourceD.scala:120:21] wire [255:0] _queue_io_deq_bits_data; // @[SourceD.scala:120:21] wire io_req_valid_0 = io_req_valid; // @[SourceD.scala:48:7] wire io_req_bits_prio_0_0 = io_req_bits_prio_0; // @[SourceD.scala:48:7] wire io_req_bits_prio_1_0 = io_req_bits_prio_1; // @[SourceD.scala:48:7] wire io_req_bits_prio_2_0 = io_req_bits_prio_2; // @[SourceD.scala:48:7] wire io_req_bits_control_0 = io_req_bits_control; // @[SourceD.scala:48:7] wire [2:0] io_req_bits_opcode_0 = io_req_bits_opcode; // @[SourceD.scala:48:7] wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceD.scala:48:7] wire [2:0] io_req_bits_size_0 = io_req_bits_size; // @[SourceD.scala:48:7] wire [7:0] io_req_bits_source_0 = io_req_bits_source; // @[SourceD.scala:48:7] wire [12:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceD.scala:48:7] wire [5:0] io_req_bits_offset_0 = io_req_bits_offset; // @[SourceD.scala:48:7] wire [5:0] io_req_bits_put_0 = io_req_bits_put; // @[SourceD.scala:48:7] wire [9:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceD.scala:48:7] wire [4:0] io_req_bits_sink_0 = io_req_bits_sink; // @[SourceD.scala:48:7] wire [2:0] io_req_bits_way_0 = io_req_bits_way; // @[SourceD.scala:48:7] wire io_req_bits_bad_0 = io_req_bits_bad; // @[SourceD.scala:48:7] wire io_d_ready_0 = io_d_ready; // @[SourceD.scala:48:7] wire io_pb_pop_ready_0 = io_pb_pop_ready; // @[SourceD.scala:48:7] wire [255:0] io_pb_beat_data_0 = io_pb_beat_data; // @[SourceD.scala:48:7] wire [31:0] io_pb_beat_mask_0 = io_pb_beat_mask; // @[SourceD.scala:48:7] wire io_pb_beat_corrupt_0 = io_pb_beat_corrupt; // @[SourceD.scala:48:7] wire io_rel_pop_ready_0 = io_rel_pop_ready; // @[SourceD.scala:48:7] wire [255:0] io_rel_beat_data_0 = io_rel_beat_data; // @[SourceD.scala:48:7] wire io_rel_beat_corrupt_0 = io_rel_beat_corrupt; // @[SourceD.scala:48:7] wire io_bs_radr_ready_0 = io_bs_radr_ready; // @[SourceD.scala:48:7] wire [255:0] io_bs_rdat_data_0 = io_bs_rdat_data; // @[SourceD.scala:48:7] wire io_bs_wadr_ready_0 = io_bs_wadr_ready; // @[SourceD.scala:48:7] wire [9:0] io_evict_req_set_0 = io_evict_req_set; // @[SourceD.scala:48:7] wire [2:0] io_evict_req_way_0 = io_evict_req_way; // @[SourceD.scala:48:7] wire [9:0] io_grant_req_set_0 = io_grant_req_set; // @[SourceD.scala:48:7] wire [2:0] io_grant_req_way_0 = io_grant_req_way; // @[SourceD.scala:48:7] wire io_bs_radr_bits_noop = 1'h0; // @[SourceD.scala:48:7] wire io_bs_wadr_bits_noop = 1'h0; // @[SourceD.scala:48:7] wire [2:0] resp_opcode_0 = 3'h0; // @[SourceD.scala:215:28] wire [2:0] resp_opcode_1 = 3'h0; // @[SourceD.scala:215:28] wire [2:0] resp_opcode_7 = 3'h4; // @[SourceD.scala:215:28] wire [2:0] resp_opcode_5 = 3'h2; // @[SourceD.scala:215:28] wire [2:0] resp_opcode_2 = 3'h1; // @[SourceD.scala:215:28] wire [2:0] resp_opcode_3 = 3'h1; // @[SourceD.scala:215:28] wire [2:0] resp_opcode_4 = 3'h1; // @[SourceD.scala:215:28] wire [31:0] _s2_pdata_raw_mask_T = 32'hFFFFFFFF; // @[SourceD.scala:161:64] wire _io_req_ready_T; // @[SourceD.scala:140:19] wire d_ready = io_d_ready_0; // @[SourceD.scala:48:7, :218:15] wire d_valid; // @[SourceD.scala:218:15] wire [2:0] d_bits_opcode; // @[SourceD.scala:218:15] wire [1:0] d_bits_param; // @[SourceD.scala:218:15] wire [2:0] d_bits_size; // @[SourceD.scala:218:15] wire [7:0] d_bits_source; // @[SourceD.scala:218:15] wire [4:0] d_bits_sink; // @[SourceD.scala:218:15] wire d_bits_denied; // @[SourceD.scala:218:15] wire [255:0] d_bits_data; // @[SourceD.scala:218:15] wire d_bits_corrupt; // @[SourceD.scala:218:15] wire _io_pb_pop_valid_T; // @[SourceD.scala:164:34] wire _io_rel_pop_valid_T_1; // @[SourceD.scala:167:35] wire s1_valid_r; // @[SourceD.scala:96:56] wire [2:0] s1_req_way; // @[SourceD.scala:88:19] wire [9:0] s1_req_set; // @[SourceD.scala:88:19] wire s1_beat; // @[SourceD.scala:102:56] wire [3:0] s1_mask; // @[SourceD.scala:92:76] wire _io_bs_wadr_valid_T; // @[SourceD.scala:270:31] wire [3:0] _io_bs_wadr_bits_mask_T_60; // @[SourceD.scala:275:30] wire _io_evict_safe_T_22; // @[SourceD.scala:378:90] wire _io_grant_safe_T_22; // @[SourceD.scala:385:90] wire io_req_ready_0; // @[SourceD.scala:48:7] wire [2:0] io_d_bits_opcode_0; // @[SourceD.scala:48:7] wire [1:0] io_d_bits_param_0; // @[SourceD.scala:48:7] wire [2:0] io_d_bits_size_0; // @[SourceD.scala:48:7] wire [7:0] io_d_bits_source_0; // @[SourceD.scala:48:7] wire [4:0] io_d_bits_sink_0; // @[SourceD.scala:48:7] wire io_d_bits_denied_0; // @[SourceD.scala:48:7] wire [255:0] io_d_bits_data_0; // @[SourceD.scala:48:7] wire io_d_bits_corrupt_0; // @[SourceD.scala:48:7] wire io_d_valid_0; // @[SourceD.scala:48:7] wire [5:0] io_pb_pop_bits_index_0; // @[SourceD.scala:48:7] wire io_pb_pop_bits_last_0; // @[SourceD.scala:48:7] wire io_pb_pop_valid_0; // @[SourceD.scala:48:7] wire [5:0] io_rel_pop_bits_index_0; // @[SourceD.scala:48:7] wire io_rel_pop_bits_last_0; // @[SourceD.scala:48:7] wire io_rel_pop_valid_0; // @[SourceD.scala:48:7] wire [2:0] io_bs_radr_bits_way_0; // @[SourceD.scala:48:7] wire [9:0] io_bs_radr_bits_set_0; // @[SourceD.scala:48:7] wire io_bs_radr_bits_beat_0; // @[SourceD.scala:48:7] wire [3:0] io_bs_radr_bits_mask_0; // @[SourceD.scala:48:7] wire io_bs_radr_valid_0; // @[SourceD.scala:48:7] wire [2:0] io_bs_wadr_bits_way_0; // @[SourceD.scala:48:7] wire [9:0] io_bs_wadr_bits_set_0; // @[SourceD.scala:48:7] wire io_bs_wadr_bits_beat_0; // @[SourceD.scala:48:7] wire [3:0] io_bs_wadr_bits_mask_0; // @[SourceD.scala:48:7] wire io_bs_wadr_valid_0; // @[SourceD.scala:48:7] wire [255:0] io_bs_wdat_data_0; // @[SourceD.scala:48:7] wire io_evict_safe_0; // @[SourceD.scala:48:7] wire io_grant_safe_0; // @[SourceD.scala:48:7] wire _s1_valid_T_3; // @[SourceD.scala:141:38] wire s1_valid; // @[SourceD.scala:74:22] wire _s2_valid_T_2; // @[SourceD.scala:183:23] wire s2_valid; // @[SourceD.scala:75:22] wire _s3_valid_T_2; // @[SourceD.scala:241:23] wire s3_valid; // @[SourceD.scala:76:22] wire _s2_ready_T_4; // @[SourceD.scala:184:24] wire s2_ready; // @[SourceD.scala:77:22] wire _s3_ready_T_4; // @[SourceD.scala:242:24] wire s3_ready; // @[SourceD.scala:78:22] wire _s4_ready_T_5; // @[SourceD.scala:293:59] wire s4_ready; // @[SourceD.scala:79:22] reg busy; // @[SourceD.scala:84:21] reg s1_block_r; // @[SourceD.scala:85:27] reg s1_counter; // @[SourceD.scala:86:27] wire _s1_req_reg_T = ~busy; // @[SourceD.scala:84:21, :87:43] wire _s1_req_reg_T_1 = _s1_req_reg_T & io_req_valid_0; // @[SourceD.scala:48:7, :87:{43,49}] reg s1_req_reg_prio_0; // @[SourceD.scala:87:29] reg s1_req_reg_prio_1; // @[SourceD.scala:87:29] reg s1_req_reg_prio_2; // @[SourceD.scala:87:29] reg s1_req_reg_control; // @[SourceD.scala:87:29] reg [2:0] s1_req_reg_opcode; // @[SourceD.scala:87:29] reg [2:0] s1_req_reg_param; // @[SourceD.scala:87:29] reg [2:0] s1_req_reg_size; // @[SourceD.scala:87:29] reg [7:0] s1_req_reg_source; // @[SourceD.scala:87:29] reg [12:0] s1_req_reg_tag; // @[SourceD.scala:87:29] reg [5:0] s1_req_reg_offset; // @[SourceD.scala:87:29] reg [5:0] s1_req_reg_put; // @[SourceD.scala:87:29] reg [9:0] s1_req_reg_set; // @[SourceD.scala:87:29] reg [4:0] s1_req_reg_sink; // @[SourceD.scala:87:29] reg [2:0] s1_req_reg_way; // @[SourceD.scala:87:29] reg s1_req_reg_bad; // @[SourceD.scala:87:29] wire _s1_req_T = ~busy; // @[SourceD.scala:84:21, :87:43, :88:20] wire s1_req_prio_0 = _s1_req_T ? io_req_bits_prio_0_0 : s1_req_reg_prio_0; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire s1_req_prio_1 = _s1_req_T ? io_req_bits_prio_1_0 : s1_req_reg_prio_1; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire s1_req_prio_2 = _s1_req_T ? io_req_bits_prio_2_0 : s1_req_reg_prio_2; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire s1_req_control = _s1_req_T ? io_req_bits_control_0 : s1_req_reg_control; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [2:0] s1_req_opcode = _s1_req_T ? io_req_bits_opcode_0 : s1_req_reg_opcode; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [2:0] s1_req_param = _s1_req_T ? io_req_bits_param_0 : s1_req_reg_param; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [2:0] s1_req_size = _s1_req_T ? io_req_bits_size_0 : s1_req_reg_size; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [7:0] s1_req_source = _s1_req_T ? io_req_bits_source_0 : s1_req_reg_source; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [12:0] s1_req_tag = _s1_req_T ? io_req_bits_tag_0 : s1_req_reg_tag; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [5:0] s1_req_offset = _s1_req_T ? io_req_bits_offset_0 : s1_req_reg_offset; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [5:0] s1_req_put = _s1_req_T ? io_req_bits_put_0 : s1_req_reg_put; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] assign s1_req_set = _s1_req_T ? io_req_bits_set_0 : s1_req_reg_set; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire [4:0] s1_req_sink = _s1_req_T ? io_req_bits_sink_0 : s1_req_reg_sink; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] assign s1_req_way = _s1_req_T ? io_req_bits_way_0 : s1_req_reg_way; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] wire s1_req_bad = _s1_req_T ? io_req_bits_bad_0 : s1_req_reg_bad; // @[SourceD.scala:48:7, :87:29, :88:{19,20}] assign io_bs_radr_bits_set_0 = s1_req_set; // @[SourceD.scala:48:7, :88:19] assign io_bs_radr_bits_way_0 = s1_req_way; // @[SourceD.scala:48:7, :88:19] wire [3:0] _s1_x_bypass_T_1; // @[SourceD.scala:360:44] wire [3:0] s1_x_bypass; // @[SourceD.scala:89:25] wire _T_1 = busy | io_req_valid_0; // @[SourceD.scala:48:7, :84:21, :90:40] wire _s1_latch_bypass_T; // @[SourceD.scala:90:40] assign _s1_latch_bypass_T = _T_1; // @[SourceD.scala:90:40] wire _s1_valid_r_T; // @[SourceD.scala:96:26] assign _s1_valid_r_T = _T_1; // @[SourceD.scala:90:40, :96:26] wire _s1_valid_T; // @[SourceD.scala:141:21] assign _s1_valid_T = _T_1; // @[SourceD.scala:90:40, :141:21] wire _s1_latch_bypass_T_1 = ~_s1_latch_bypass_T; // @[SourceD.scala:90:{33,40}] wire _s1_latch_bypass_T_2 = _s1_latch_bypass_T_1 | s2_ready; // @[SourceD.scala:77:22, :90:{33,57}] reg s1_latch_bypass; // @[SourceD.scala:90:32] reg [3:0] s1_bypass_r; // @[SourceD.scala:91:62] wire [3:0] s1_bypass = s1_latch_bypass ? s1_x_bypass : s1_bypass_r; // @[SourceD.scala:89:25, :90:32, :91:{22,62}] wire [4:0] _s1_mask_sizeOH_T = {2'h0, s1_req_size}; // @[Misc.scala:202:34] wire [2:0] s1_mask_sizeOH_shiftAmount = _s1_mask_sizeOH_T[2:0]; // @[OneHot.scala:64:49] wire [7:0] _s1_mask_sizeOH_T_1 = 8'h1 << s1_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [4:0] _s1_mask_sizeOH_T_2 = _s1_mask_sizeOH_T_1[4:0]; // @[OneHot.scala:65:{12,27}] wire [4:0] s1_mask_sizeOH = {_s1_mask_sizeOH_T_2[4], 4'hF}; // @[OneHot.scala:65:27] wire s1_mask_sub_sub_0_1 = s1_req_size > 3'h4; // @[Misc.scala:206:21] wire s1_mask_sub_size = s1_mask_sizeOH[4]; // @[Misc.scala:202:81, :209:26] wire s1_mask_sub_bit = s1_req_offset[4]; // @[Misc.scala:210:26] wire s1_mask_sub_1_2 = s1_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire s1_mask_sub_nbit = ~s1_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire s1_mask_sub_0_2 = s1_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _s1_mask_sub_acc_T = s1_mask_sub_size & s1_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire s1_mask_sub_0_1 = s1_mask_sub_sub_0_1 | _s1_mask_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _s1_mask_sub_acc_T_1 = s1_mask_sub_size & s1_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire s1_mask_sub_1_1 = s1_mask_sub_sub_0_1 | _s1_mask_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire s1_mask_size = s1_mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire s1_mask_bit = s1_req_offset[3]; // @[Misc.scala:210:26] wire s1_mask_nbit = ~s1_mask_bit; // @[Misc.scala:210:26, :211:20] wire s1_mask_eq = s1_mask_sub_0_2 & s1_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _s1_mask_acc_T = s1_mask_size & s1_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire s1_mask_acc = s1_mask_sub_0_1 | _s1_mask_acc_T; // @[Misc.scala:215:{29,38}] wire s1_mask_eq_1 = s1_mask_sub_0_2 & s1_mask_bit; // @[Misc.scala:210:26, :214:27] wire _s1_mask_acc_T_1 = s1_mask_size & s1_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire s1_mask_acc_1 = s1_mask_sub_0_1 | _s1_mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire s1_mask_eq_2 = s1_mask_sub_1_2 & s1_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _s1_mask_acc_T_2 = s1_mask_size & s1_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire s1_mask_acc_2 = s1_mask_sub_1_1 | _s1_mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire s1_mask_eq_3 = s1_mask_sub_1_2 & s1_mask_bit; // @[Misc.scala:210:26, :214:27] wire _s1_mask_acc_T_3 = s1_mask_size & s1_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire s1_mask_acc_3 = s1_mask_sub_1_1 | _s1_mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire [1:0] s1_mask_lo = {s1_mask_acc_1, s1_mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] s1_mask_hi = {s1_mask_acc_3, s1_mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] _s1_mask_T = {s1_mask_hi, s1_mask_lo}; // @[Misc.scala:222:10] wire [3:0] _s1_mask_T_1 = ~s1_bypass; // @[SourceD.scala:91:22, :92:78] assign s1_mask = _s1_mask_T & _s1_mask_T_1; // @[Misc.scala:222:10] assign io_bs_radr_bits_mask_0 = s1_mask; // @[SourceD.scala:48:7, :92:76] wire _GEN = s1_req_opcode == 3'h6; // @[SourceD.scala:88:19, :93:33] wire _s1_grant_T; // @[SourceD.scala:93:33] assign _s1_grant_T = _GEN; // @[SourceD.scala:93:33] wire _s1_single_T_2; // @[SourceD.scala:98:89] assign _s1_single_T_2 = _GEN; // @[SourceD.scala:93:33, :98:89] wire _s1_grant_T_1 = s1_req_param == 3'h2; // @[SourceD.scala:88:19, :93:66] wire _s1_grant_T_2 = _s1_grant_T & _s1_grant_T_1; // @[SourceD.scala:93:{33,50,66}] wire _s1_grant_T_3 = &s1_req_opcode; // @[SourceD.scala:88:19, :93:93] wire s1_grant = _s1_grant_T_2 | _s1_grant_T_3; // @[SourceD.scala:93:{50,76,93}] wire _s1_need_r_T = |s1_mask; // @[SourceD.scala:92:76, :94:27] wire _s1_need_r_T_1 = _s1_need_r_T & s1_req_prio_0; // @[SourceD.scala:88:19, :94:{27,31}] wire _s1_need_r_T_2 = s1_req_opcode != 3'h5; // @[SourceD.scala:88:19, :94:66] wire _s1_need_r_T_3 = _s1_need_r_T_1 & _s1_need_r_T_2; // @[SourceD.scala:94:{31,49,66}] wire _s1_need_r_T_4 = ~s1_grant; // @[SourceD.scala:93:76, :94:78] wire _s1_need_r_T_5 = _s1_need_r_T_3 & _s1_need_r_T_4; // @[SourceD.scala:94:{49,75,78}] wire _s1_need_r_T_6 = |s1_req_opcode; // @[SourceD.scala:88:19, :95:34] wire _s1_need_r_T_7 = s1_req_size < 3'h3; // @[SourceD.scala:88:19, :95:65] wire _s1_need_r_T_8 = _s1_need_r_T_6 | _s1_need_r_T_7; // @[SourceD.scala:95:{34,50,65}] wire s1_need_r = _s1_need_r_T_5 & _s1_need_r_T_8; // @[SourceD.scala:94:{75,88}, :95:50] wire _s1_valid_r_T_1 = _s1_valid_r_T & s1_need_r; // @[SourceD.scala:94:88, :96:{26,43}] wire _s1_valid_r_T_2 = ~s1_block_r; // @[SourceD.scala:85:27, :96:59] assign s1_valid_r = _s1_valid_r_T_1 & _s1_valid_r_T_2; // @[SourceD.scala:96:{43,56,59}] assign io_bs_radr_valid_0 = s1_valid_r; // @[SourceD.scala:48:7, :96:56] wire _s1_need_pb_T = s1_req_opcode[2]; // @[SourceD.scala:88:19, :97:54] wire _s1_need_pb_T_1 = ~_s1_need_pb_T; // @[SourceD.scala:97:{40,54}] wire _s1_need_pb_T_2 = s1_req_opcode[0]; // @[SourceD.scala:88:19, :97:72] wire s1_need_pb = s1_req_prio_0 ? _s1_need_pb_T_1 : _s1_need_pb_T_2; // @[SourceD.scala:88:19, :97:{23,40,72}] wire _s1_single_T = s1_req_opcode == 3'h5; // @[SourceD.scala:88:19, :98:53] wire _s1_single_T_1 = _s1_single_T | s1_grant; // @[SourceD.scala:93:76, :98:{53,62}] wire s1_single = s1_req_prio_0 ? _s1_single_T_1 : _s1_single_T_2; // @[SourceD.scala:88:19, :98:{22,62,89}] wire s1_retires = ~s1_single; // @[SourceD.scala:98:22, :99:20] wire [12:0] _s1_beats1_T = 13'h3F << s1_req_size; // @[package.scala:243:71] wire [5:0] _s1_beats1_T_1 = _s1_beats1_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _s1_beats1_T_2 = ~_s1_beats1_T_1; // @[package.scala:243:{46,76}] wire _s1_beats1_T_3 = _s1_beats1_T_2[5]; // @[package.scala:243:46] wire s1_beats1 = ~s1_single & _s1_beats1_T_3; // @[SourceD.scala:98:22, :101:{22,95}] wire _s1_beat_T = s1_req_offset[5]; // @[SourceD.scala:88:19, :102:32] assign s1_beat = _s1_beat_T | s1_counter; // @[SourceD.scala:86:27, :102:{32,56}] assign io_bs_radr_bits_beat_0 = s1_beat; // @[SourceD.scala:48:7, :102:56] wire s1_last = s1_counter == s1_beats1; // @[SourceD.scala:86:27, :101:22, :103:28] wire s1_first = ~s1_counter; // @[SourceD.scala:86:27, :104:29] wire _queue_io_enq_valid_T = io_bs_radr_ready_0 & io_bs_radr_valid_0; // @[Decoupled.scala:51:35] reg queue_io_enq_valid_REG; // @[SourceD.scala:121:40] reg queue_io_enq_valid_REG_1; // @[SourceD.scala:121:32] wire s2_latch = s1_valid & s2_ready; // @[SourceD.scala:74:22, :77:22, :129:18, :146:27] wire [1:0] _s1_counter_T = {1'h0, s1_counter} + 2'h1; // @[SourceD.scala:86:27, :130:30] wire _s1_counter_T_1 = _s1_counter_T[0]; // @[SourceD.scala:130:30] assign _io_req_ready_T = ~busy; // @[SourceD.scala:84:21, :87:43, :140:19] assign io_req_ready_0 = _io_req_ready_T; // @[SourceD.scala:48:7, :140:19] wire _s1_valid_T_1 = ~s1_valid_r; // @[SourceD.scala:96:56, :141:42] wire _s1_valid_T_2 = _s1_valid_T_1 | io_bs_radr_ready_0; // @[SourceD.scala:48:7, :141:{42,54}] assign _s1_valid_T_3 = _s1_valid_T & _s1_valid_T_2; // @[SourceD.scala:141:{21,38,54}] assign s1_valid = _s1_valid_T_3; // @[SourceD.scala:74:22, :141:38] reg s2_full; // @[SourceD.scala:147:24] reg s2_valid_pb; // @[SourceD.scala:148:28] reg s2_beat; // @[SourceD.scala:149:26] reg [3:0] s2_bypass; // @[SourceD.scala:150:28] reg s2_req_prio_0; // @[SourceD.scala:151:25] reg s2_req_prio_1; // @[SourceD.scala:151:25] reg s2_req_prio_2; // @[SourceD.scala:151:25] reg s2_req_control; // @[SourceD.scala:151:25] reg [2:0] s2_req_opcode; // @[SourceD.scala:151:25] reg [2:0] s2_req_param; // @[SourceD.scala:151:25] reg [2:0] s2_req_size; // @[SourceD.scala:151:25] reg [7:0] s2_req_source; // @[SourceD.scala:151:25] reg [12:0] s2_req_tag; // @[SourceD.scala:151:25] reg [5:0] s2_req_offset; // @[SourceD.scala:151:25] reg [5:0] s2_req_put; // @[SourceD.scala:151:25] assign io_pb_pop_bits_index_0 = s2_req_put; // @[SourceD.scala:48:7, :151:25] assign io_rel_pop_bits_index_0 = s2_req_put; // @[SourceD.scala:48:7, :151:25] reg [9:0] s2_req_set; // @[SourceD.scala:151:25] reg [4:0] s2_req_sink; // @[SourceD.scala:151:25] reg [2:0] s2_req_way; // @[SourceD.scala:151:25] reg s2_req_bad; // @[SourceD.scala:151:25] reg s2_last; // @[SourceD.scala:152:26] assign io_pb_pop_bits_last_0 = s2_last; // @[SourceD.scala:48:7, :152:26] assign io_rel_pop_bits_last_0 = s2_last; // @[SourceD.scala:48:7, :152:26] reg s2_need_r; // @[SourceD.scala:153:28] reg s2_need_pb; // @[SourceD.scala:154:29] reg s2_retires; // @[SourceD.scala:155:29] wire _s2_need_d_T = ~s1_need_pb; // @[SourceD.scala:97:23, :156:29] wire _s2_need_d_T_1 = _s2_need_d_T | s1_first; // @[SourceD.scala:104:29, :156:{29,41}] reg s2_need_d; // @[SourceD.scala:156:28] wire [255:0] _s2_pdata_raw_data_T; // @[SourceD.scala:160:30] wire [31:0] _s2_pdata_raw_mask_T_1; // @[SourceD.scala:161:30] wire _s2_pdata_raw_corrupt_T; // @[SourceD.scala:162:30] wire [255:0] s2_pdata_raw_data; // @[SourceD.scala:157:26] wire [31:0] s2_pdata_raw_mask; // @[SourceD.scala:157:26] wire s2_pdata_raw_corrupt; // @[SourceD.scala:157:26] reg [255:0] s2_pdata_r_data; // @[package.scala:88:63] reg [31:0] s2_pdata_r_mask; // @[package.scala:88:63] reg s2_pdata_r_corrupt; // @[package.scala:88:63] wire [255:0] s2_pdata_data = s2_valid_pb ? s2_pdata_raw_data : s2_pdata_r_data; // @[package.scala:88:{42,63}] wire [31:0] s2_pdata_mask = s2_valid_pb ? s2_pdata_raw_mask : s2_pdata_r_mask; // @[package.scala:88:{42,63}] wire s2_pdata_corrupt = s2_valid_pb ? s2_pdata_raw_corrupt : s2_pdata_r_corrupt; // @[package.scala:88:{42,63}] assign _s2_pdata_raw_data_T = s2_req_prio_0 ? io_pb_beat_data_0 : io_rel_beat_data_0; // @[SourceD.scala:48:7, :151:25, :160:30] assign s2_pdata_raw_data = _s2_pdata_raw_data_T; // @[SourceD.scala:157:26, :160:30] assign _s2_pdata_raw_mask_T_1 = s2_req_prio_0 ? io_pb_beat_mask_0 : 32'hFFFFFFFF; // @[SourceD.scala:48:7, :151:25, :161:30] assign s2_pdata_raw_mask = _s2_pdata_raw_mask_T_1; // @[SourceD.scala:157:26, :161:30] assign _s2_pdata_raw_corrupt_T = s2_req_prio_0 ? io_pb_beat_corrupt_0 : io_rel_beat_corrupt_0; // @[SourceD.scala:48:7, :151:25, :162:30] assign s2_pdata_raw_corrupt = _s2_pdata_raw_corrupt_T; // @[SourceD.scala:157:26, :162:30] assign _io_pb_pop_valid_T = s2_valid_pb & s2_req_prio_0; // @[SourceD.scala:148:28, :151:25, :164:34] assign io_pb_pop_valid_0 = _io_pb_pop_valid_T; // @[SourceD.scala:48:7, :164:34] wire _io_rel_pop_valid_T = ~s2_req_prio_0; // @[SourceD.scala:151:25, :167:38] assign _io_rel_pop_valid_T_1 = s2_valid_pb & _io_rel_pop_valid_T; // @[SourceD.scala:148:28, :167:{35,38}] assign io_rel_pop_valid_0 = _io_rel_pop_valid_T_1; // @[SourceD.scala:48:7, :167:35] wire pb_ready = s2_req_prio_0 ? io_pb_pop_ready_0 : io_rel_pop_ready_0; // @[SourceD.scala:48:7, :151:25, :175:21] wire s3_latch = s2_valid & s3_ready; // @[SourceD.scala:75:22, :78:22, :177:18, :189:27] wire _s2_valid_T = ~s2_valid_pb; // @[SourceD.scala:148:28, :183:27] wire _s2_valid_T_1 = _s2_valid_T | pb_ready; // @[SourceD.scala:175:21, :183:{27,40}] assign _s2_valid_T_2 = s2_full & _s2_valid_T_1; // @[SourceD.scala:147:24, :183:{23,40}] assign s2_valid = _s2_valid_T_2; // @[SourceD.scala:75:22, :183:23] wire _s2_ready_T = ~s2_full; // @[SourceD.scala:147:24, :184:15] wire _s2_ready_T_1 = ~s2_valid_pb; // @[SourceD.scala:148:28, :183:27, :184:41] wire _s2_ready_T_2 = _s2_ready_T_1 | pb_ready; // @[SourceD.scala:175:21, :184:{41,54}] wire _s2_ready_T_3 = s3_ready & _s2_ready_T_2; // @[SourceD.scala:78:22, :184:{37,54}] assign _s2_ready_T_4 = _s2_ready_T | _s2_ready_T_3; // @[SourceD.scala:184:{15,24,37}] assign s2_ready = _s2_ready_T_4; // @[SourceD.scala:77:22, :184:24] reg s3_full; // @[SourceD.scala:190:24] reg s3_valid_d; // @[SourceD.scala:191:27] assign d_valid = s3_valid_d; // @[SourceD.scala:191:27, :218:15] reg s3_beat; // @[SourceD.scala:192:26] wire pre_s3_beat = s3_latch ? s2_beat : s3_beat; // @[SourceD.scala:149:26, :189:27, :192:26, :319:24] reg [3:0] s3_bypass; // @[SourceD.scala:193:28] reg s3_req_prio_0; // @[SourceD.scala:194:25] reg s3_req_prio_1; // @[SourceD.scala:194:25] reg s3_req_prio_2; // @[SourceD.scala:194:25] reg s3_req_control; // @[SourceD.scala:194:25] reg [2:0] s3_req_opcode; // @[SourceD.scala:194:25] reg [2:0] s3_req_param; // @[SourceD.scala:194:25] reg [2:0] s3_req_size; // @[SourceD.scala:194:25] assign d_bits_size = s3_req_size; // @[SourceD.scala:194:25, :218:15] reg [7:0] s3_req_source; // @[SourceD.scala:194:25] assign d_bits_source = s3_req_source; // @[SourceD.scala:194:25, :218:15] reg [12:0] s3_req_tag; // @[SourceD.scala:194:25] reg [5:0] s3_req_offset; // @[SourceD.scala:194:25] reg [5:0] s3_req_put; // @[SourceD.scala:194:25] reg [9:0] s3_req_set; // @[SourceD.scala:194:25] reg [4:0] s3_req_sink; // @[SourceD.scala:194:25] assign d_bits_sink = s3_req_sink; // @[SourceD.scala:194:25, :218:15] reg [2:0] s3_req_way; // @[SourceD.scala:194:25] reg s3_req_bad; // @[SourceD.scala:194:25] assign d_bits_denied = s3_req_bad; // @[SourceD.scala:194:25, :218:15] wire pre_s3_req_prio_0 = s3_latch ? s2_req_prio_0 : s3_req_prio_0; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire pre_s3_req_prio_1 = s3_latch ? s2_req_prio_1 : s3_req_prio_1; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire pre_s3_req_prio_2 = s3_latch ? s2_req_prio_2 : s3_req_prio_2; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire pre_s3_req_control = s3_latch ? s2_req_control : s3_req_control; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [2:0] pre_s3_req_opcode = s3_latch ? s2_req_opcode : s3_req_opcode; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [2:0] pre_s3_req_param = s3_latch ? s2_req_param : s3_req_param; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [2:0] pre_s3_req_size = s3_latch ? s2_req_size : s3_req_size; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [7:0] pre_s3_req_source = s3_latch ? s2_req_source : s3_req_source; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [12:0] pre_s3_req_tag = s3_latch ? s2_req_tag : s3_req_tag; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [5:0] pre_s3_req_offset = s3_latch ? s2_req_offset : s3_req_offset; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [5:0] pre_s3_req_put = s3_latch ? s2_req_put : s3_req_put; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [9:0] pre_s3_req_set = s3_latch ? s2_req_set : s3_req_set; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [4:0] pre_s3_req_sink = s3_latch ? s2_req_sink : s3_req_sink; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [2:0] pre_s3_req_way = s3_latch ? s2_req_way : s3_req_way; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire pre_s3_req_bad = s3_latch ? s2_req_bad : s3_req_bad; // @[SourceD.scala:151:25, :189:27, :194:25, :315:24] wire [2:0] s3_adjusted_opcode = s3_req_bad ? 3'h4 : s3_req_opcode; // @[SourceD.scala:194:25, :195:31] reg s3_last; // @[SourceD.scala:196:26] reg [255:0] s3_pdata_data; // @[SourceD.scala:197:27] reg [31:0] s3_pdata_mask; // @[SourceD.scala:197:27] reg s3_pdata_corrupt; // @[SourceD.scala:197:27] reg s3_need_pb; // @[SourceD.scala:198:29] reg s3_retires; // @[SourceD.scala:199:29] reg s3_need_r; // @[SourceD.scala:200:28] wire _s3_acq_T = s3_req_opcode == 3'h6; // @[SourceD.scala:194:25, :202:30] wire _s3_acq_T_1 = &s3_req_opcode; // @[SourceD.scala:194:25, :202:64] wire s3_acq = _s3_acq_T | _s3_acq_T_1; // @[SourceD.scala:202:{30,47,64}] wire [255:0] _s3_bypass_data_T_50; // @[package.scala:45:27] wire [255:0] s3_bypass_data; // @[SourceD.scala:206:28] wire _s3_rdata_T = s3_bypass[0]; // @[SourceD.scala:193:28, :208:78] wire _s3_rdata_T_1 = s3_bypass[1]; // @[SourceD.scala:193:28, :208:78] wire _s3_rdata_T_2 = s3_bypass[2]; // @[SourceD.scala:193:28, :208:78] wire _s3_rdata_T_3 = s3_bypass[3]; // @[SourceD.scala:193:28, :208:78] wire [63:0] _s3_rdata_T_4 = s3_bypass_data[63:0]; // @[SourceD.scala:206:28, :207:78] wire [63:0] _s3_rdata_T_5 = s3_bypass_data[127:64]; // @[SourceD.scala:206:28, :207:78] wire [63:0] _s3_rdata_T_6 = s3_bypass_data[191:128]; // @[SourceD.scala:206:28, :207:78] wire [63:0] _s3_rdata_T_7 = s3_bypass_data[255:192]; // @[SourceD.scala:206:28, :207:78] wire [63:0] _s3_rdata_T_8 = _queue_io_deq_bits_data[63:0]; // @[SourceD.scala:120:21, :207:78] wire [63:0] _s3_rdata_T_9 = _queue_io_deq_bits_data[127:64]; // @[SourceD.scala:120:21, :207:78] wire [63:0] _s3_rdata_T_10 = _queue_io_deq_bits_data[191:128]; // @[SourceD.scala:120:21, :207:78] wire [63:0] _s3_rdata_T_11 = _queue_io_deq_bits_data[255:192]; // @[SourceD.scala:120:21, :207:78] wire [63:0] _s3_rdata_T_12 = _s3_rdata_T ? _s3_rdata_T_4 : _s3_rdata_T_8; // @[SourceD.scala:207:78, :208:78, :210:75] wire [63:0] _s3_rdata_T_13 = _s3_rdata_T_1 ? _s3_rdata_T_5 : _s3_rdata_T_9; // @[SourceD.scala:207:78, :208:78, :210:75] wire [63:0] _s3_rdata_T_14 = _s3_rdata_T_2 ? _s3_rdata_T_6 : _s3_rdata_T_10; // @[SourceD.scala:207:78, :208:78, :210:75] wire [63:0] _s3_rdata_T_15 = _s3_rdata_T_3 ? _s3_rdata_T_7 : _s3_rdata_T_11; // @[SourceD.scala:207:78, :208:78, :210:75] wire [127:0] s3_rdata_lo = {_s3_rdata_T_13, _s3_rdata_T_12}; // @[package.scala:45:27] wire [127:0] s3_rdata_hi = {_s3_rdata_T_15, _s3_rdata_T_14}; // @[package.scala:45:27] wire [255:0] s3_rdata = {s3_rdata_hi, s3_rdata_lo}; // @[package.scala:45:27] assign d_bits_data = s3_rdata; // @[package.scala:45:27] wire _grant_T = s3_req_param == 3'h2; // @[SourceD.scala:194:25, :214:32] wire [2:0] grant = {2'h2, ~_grant_T}; // @[SourceD.scala:214:{18,32}] wire [2:0] resp_opcode_6 = grant; // @[SourceD.scala:214:18, :215:28] assign io_d_valid_0 = d_valid; // @[SourceD.scala:48:7, :218:15] wire [2:0] _d_bits_opcode_T; // @[SourceD.scala:222:24] assign io_d_bits_opcode_0 = d_bits_opcode; // @[SourceD.scala:48:7, :218:15] wire [1:0] _d_bits_param_T_3; // @[SourceD.scala:223:24] assign io_d_bits_param_0 = d_bits_param; // @[SourceD.scala:48:7, :218:15] assign io_d_bits_size_0 = d_bits_size; // @[SourceD.scala:48:7, :218:15] assign io_d_bits_source_0 = d_bits_source; // @[SourceD.scala:48:7, :218:15] assign io_d_bits_sink_0 = d_bits_sink; // @[SourceD.scala:48:7, :218:15] assign io_d_bits_denied_0 = d_bits_denied; // @[SourceD.scala:48:7, :218:15] assign io_d_bits_data_0 = d_bits_data; // @[SourceD.scala:48:7, :218:15] wire _d_bits_corrupt_T_1; // @[SourceD.scala:229:32] assign io_d_bits_corrupt_0 = d_bits_corrupt; // @[SourceD.scala:48:7, :218:15] wire [7:0][2:0] _GEN_0 = {{3'h4}, {resp_opcode_6}, {3'h2}, {3'h1}, {3'h1}, {3'h1}, {3'h0}, {3'h0}}; // @[SourceD.scala:215:28, :222:24] assign _d_bits_opcode_T = s3_req_prio_0 ? _GEN_0[s3_req_opcode] : 3'h6; // @[SourceD.scala:194:25, :222:24] assign d_bits_opcode = _d_bits_opcode_T; // @[SourceD.scala:218:15, :222:24] wire _d_bits_param_T = s3_req_prio_0 & s3_acq; // @[SourceD.scala:194:25, :202:47, :223:40] wire _d_bits_param_T_1 = |s3_req_param; // @[SourceD.scala:194:25, :223:68] wire [1:0] _d_bits_param_T_2 = {1'h0, ~_d_bits_param_T_1}; // @[SourceD.scala:223:{54,68}] assign _d_bits_param_T_3 = _d_bits_param_T ? _d_bits_param_T_2 : 2'h0; // @[SourceD.scala:223:{24,40,54}] assign d_bits_param = _d_bits_param_T_3; // @[SourceD.scala:218:15, :223:24] wire _d_bits_corrupt_T = d_bits_opcode[0]; // @[SourceD.scala:218:15, :229:48] assign _d_bits_corrupt_T_1 = s3_req_bad & _d_bits_corrupt_T; // @[SourceD.scala:194:25, :229:{32,48}] assign d_bits_corrupt = _d_bits_corrupt_T_1; // @[SourceD.scala:218:15, :229:32] wire _queue_io_deq_ready_T = s3_valid & s4_ready; // @[SourceD.scala:76:22, :79:22, :231:34] wire _queue_io_deq_ready_T_1 = _queue_io_deq_ready_T & s3_need_r; // @[SourceD.scala:200:28, :231:{34,46}] wire _s3_valid_T = ~s3_valid_d; // @[SourceD.scala:191:27, :241:27] wire _s3_valid_T_1 = _s3_valid_T | d_ready; // @[SourceD.scala:218:15, :241:{27,39}] assign _s3_valid_T_2 = s3_full & _s3_valid_T_1; // @[SourceD.scala:190:24, :241:{23,39}] assign s3_valid = _s3_valid_T_2; // @[SourceD.scala:76:22, :241:23] wire _s3_ready_T = ~s3_full; // @[SourceD.scala:190:24, :232:11, :242:15] wire _s3_ready_T_1 = ~s3_valid_d; // @[SourceD.scala:191:27, :241:27, :242:41] wire _s3_ready_T_2 = _s3_ready_T_1 | d_ready; // @[SourceD.scala:218:15, :242:{41,53}] wire _s3_ready_T_3 = s4_ready & _s3_ready_T_2; // @[SourceD.scala:79:22, :242:{37,53}] assign _s3_ready_T_4 = _s3_ready_T | _s3_ready_T_3; // @[SourceD.scala:242:{15,24,37}] assign s3_ready = _s3_ready_T_4; // @[SourceD.scala:78:22, :242:24] wire _s4_latch_T = s3_valid & s3_retires; // @[SourceD.scala:76:22, :199:29, :247:27] wire s4_latch = _s4_latch_T & s4_ready; // @[SourceD.scala:79:22, :247:{27,41}] reg s4_full; // @[SourceD.scala:248:24] reg s4_beat; // @[SourceD.scala:249:26] assign io_bs_wadr_bits_beat_0 = s4_beat; // @[SourceD.scala:48:7, :249:26] wire pre_s4_beat = s4_latch ? s3_beat : s4_beat; // @[SourceD.scala:192:26, :247:41, :249:26, :320:24] reg s4_need_r; // @[SourceD.scala:250:28] reg s4_need_bs; // @[SourceD.scala:251:29] reg s4_need_pb; // @[SourceD.scala:252:29] reg s4_req_prio_0; // @[SourceD.scala:253:25] reg s4_req_prio_1; // @[SourceD.scala:253:25] reg s4_req_prio_2; // @[SourceD.scala:253:25] reg s4_req_control; // @[SourceD.scala:253:25] reg [2:0] s4_req_opcode; // @[SourceD.scala:253:25] reg [2:0] s4_req_param; // @[SourceD.scala:253:25] reg [2:0] s4_req_size; // @[SourceD.scala:253:25] reg [7:0] s4_req_source; // @[SourceD.scala:253:25] reg [12:0] s4_req_tag; // @[SourceD.scala:253:25] reg [5:0] s4_req_offset; // @[SourceD.scala:253:25] reg [5:0] s4_req_put; // @[SourceD.scala:253:25] reg [9:0] s4_req_set; // @[SourceD.scala:253:25] assign io_bs_wadr_bits_set_0 = s4_req_set; // @[SourceD.scala:48:7, :253:25] reg [4:0] s4_req_sink; // @[SourceD.scala:253:25] reg [2:0] s4_req_way; // @[SourceD.scala:253:25] assign io_bs_wadr_bits_way_0 = s4_req_way; // @[SourceD.scala:48:7, :253:25] reg s4_req_bad; // @[SourceD.scala:253:25] wire pre_s4_req_prio_0 = s4_latch ? s3_req_prio_0 : s4_req_prio_0; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire pre_s4_req_prio_1 = s4_latch ? s3_req_prio_1 : s4_req_prio_1; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire pre_s4_req_prio_2 = s4_latch ? s3_req_prio_2 : s4_req_prio_2; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire pre_s4_req_control = s4_latch ? s3_req_control : s4_req_control; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [2:0] pre_s4_req_opcode = s4_latch ? s3_req_opcode : s4_req_opcode; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [2:0] pre_s4_req_param = s4_latch ? s3_req_param : s4_req_param; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [2:0] pre_s4_req_size = s4_latch ? s3_req_size : s4_req_size; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [7:0] pre_s4_req_source = s4_latch ? s3_req_source : s4_req_source; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [12:0] pre_s4_req_tag = s4_latch ? s3_req_tag : s4_req_tag; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [5:0] pre_s4_req_offset = s4_latch ? s3_req_offset : s4_req_offset; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [5:0] pre_s4_req_put = s4_latch ? s3_req_put : s4_req_put; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [9:0] pre_s4_req_set = s4_latch ? s3_req_set : s4_req_set; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [4:0] pre_s4_req_sink = s4_latch ? s3_req_sink : s4_req_sink; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire [2:0] pre_s4_req_way = s4_latch ? s3_req_way : s4_req_way; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] wire pre_s4_req_bad = s4_latch ? s3_req_bad : s4_req_bad; // @[SourceD.scala:194:25, :247:41, :253:25, :316:24] reg [2:0] s4_adjusted_opcode; // @[SourceD.scala:254:37] reg [255:0] s4_pdata_data; // @[SourceD.scala:255:27] reg [31:0] s4_pdata_mask; // @[SourceD.scala:255:27] reg s4_pdata_corrupt; // @[SourceD.scala:255:27] reg [255:0] s4_rdata; // @[SourceD.scala:256:27] assign _io_bs_wadr_valid_T = s4_full & s4_need_bs; // @[SourceD.scala:248:24, :251:29, :270:31] assign io_bs_wadr_valid_0 = _io_bs_wadr_valid_T; // @[SourceD.scala:48:7, :270:31] wire _io_bs_wadr_bits_mask_T = s4_pdata_mask[0]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_1 = s4_pdata_mask[1]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_2 = s4_pdata_mask[2]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_3 = s4_pdata_mask[3]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_4 = s4_pdata_mask[4]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_5 = s4_pdata_mask[5]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_6 = s4_pdata_mask[6]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_7 = s4_pdata_mask[7]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_8 = s4_pdata_mask[8]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_9 = s4_pdata_mask[9]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_10 = s4_pdata_mask[10]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_11 = s4_pdata_mask[11]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_12 = s4_pdata_mask[12]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_13 = s4_pdata_mask[13]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_14 = s4_pdata_mask[14]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_15 = s4_pdata_mask[15]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_16 = s4_pdata_mask[16]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_17 = s4_pdata_mask[17]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_18 = s4_pdata_mask[18]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_19 = s4_pdata_mask[19]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_20 = s4_pdata_mask[20]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_21 = s4_pdata_mask[21]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_22 = s4_pdata_mask[22]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_23 = s4_pdata_mask[23]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_24 = s4_pdata_mask[24]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_25 = s4_pdata_mask[25]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_26 = s4_pdata_mask[26]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_27 = s4_pdata_mask[27]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_28 = s4_pdata_mask[28]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_29 = s4_pdata_mask[29]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_30 = s4_pdata_mask[30]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_31 = s4_pdata_mask[31]; // @[SourceD.scala:255:27, :275:45] wire _io_bs_wadr_bits_mask_T_32 = _io_bs_wadr_bits_mask_T | _io_bs_wadr_bits_mask_T_1; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_33 = _io_bs_wadr_bits_mask_T_32 | _io_bs_wadr_bits_mask_T_2; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_34 = _io_bs_wadr_bits_mask_T_33 | _io_bs_wadr_bits_mask_T_3; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_35 = _io_bs_wadr_bits_mask_T_34 | _io_bs_wadr_bits_mask_T_4; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_36 = _io_bs_wadr_bits_mask_T_35 | _io_bs_wadr_bits_mask_T_5; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_37 = _io_bs_wadr_bits_mask_T_36 | _io_bs_wadr_bits_mask_T_6; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_38 = _io_bs_wadr_bits_mask_T_37 | _io_bs_wadr_bits_mask_T_7; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_39 = _io_bs_wadr_bits_mask_T_8 | _io_bs_wadr_bits_mask_T_9; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_40 = _io_bs_wadr_bits_mask_T_39 | _io_bs_wadr_bits_mask_T_10; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_41 = _io_bs_wadr_bits_mask_T_40 | _io_bs_wadr_bits_mask_T_11; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_42 = _io_bs_wadr_bits_mask_T_41 | _io_bs_wadr_bits_mask_T_12; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_43 = _io_bs_wadr_bits_mask_T_42 | _io_bs_wadr_bits_mask_T_13; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_44 = _io_bs_wadr_bits_mask_T_43 | _io_bs_wadr_bits_mask_T_14; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_45 = _io_bs_wadr_bits_mask_T_44 | _io_bs_wadr_bits_mask_T_15; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_46 = _io_bs_wadr_bits_mask_T_16 | _io_bs_wadr_bits_mask_T_17; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_47 = _io_bs_wadr_bits_mask_T_46 | _io_bs_wadr_bits_mask_T_18; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_48 = _io_bs_wadr_bits_mask_T_47 | _io_bs_wadr_bits_mask_T_19; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_49 = _io_bs_wadr_bits_mask_T_48 | _io_bs_wadr_bits_mask_T_20; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_50 = _io_bs_wadr_bits_mask_T_49 | _io_bs_wadr_bits_mask_T_21; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_51 = _io_bs_wadr_bits_mask_T_50 | _io_bs_wadr_bits_mask_T_22; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_52 = _io_bs_wadr_bits_mask_T_51 | _io_bs_wadr_bits_mask_T_23; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_53 = _io_bs_wadr_bits_mask_T_24 | _io_bs_wadr_bits_mask_T_25; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_54 = _io_bs_wadr_bits_mask_T_53 | _io_bs_wadr_bits_mask_T_26; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_55 = _io_bs_wadr_bits_mask_T_54 | _io_bs_wadr_bits_mask_T_27; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_56 = _io_bs_wadr_bits_mask_T_55 | _io_bs_wadr_bits_mask_T_28; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_57 = _io_bs_wadr_bits_mask_T_56 | _io_bs_wadr_bits_mask_T_29; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_58 = _io_bs_wadr_bits_mask_T_57 | _io_bs_wadr_bits_mask_T_30; // @[SourceD.scala:275:{45,87}] wire _io_bs_wadr_bits_mask_T_59 = _io_bs_wadr_bits_mask_T_58 | _io_bs_wadr_bits_mask_T_31; // @[SourceD.scala:275:{45,87}] wire [1:0] io_bs_wadr_bits_mask_lo = {_io_bs_wadr_bits_mask_T_45, _io_bs_wadr_bits_mask_T_38}; // @[SourceD.scala:275:{30,87}] wire [1:0] io_bs_wadr_bits_mask_hi = {_io_bs_wadr_bits_mask_T_59, _io_bs_wadr_bits_mask_T_52}; // @[SourceD.scala:275:{30,87}] assign _io_bs_wadr_bits_mask_T_60 = {io_bs_wadr_bits_mask_hi, io_bs_wadr_bits_mask_lo}; // @[SourceD.scala:275:30] assign io_bs_wadr_bits_mask_0 = _io_bs_wadr_bits_mask_T_60; // @[SourceD.scala:48:7, :275:30]
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v4.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v4.common.{MicroOp} import boom.v4.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = { return apply(brupdate, flush, uop.br_mask) } def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush } def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = { return apply(brupdate, flush, bundle.uop) } def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = { return apply(brupdate, flush, bundle.bits) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N} def apply(i: UInt, isel: UInt): UInt = { val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i) val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } object IsYoungerMask { def apply(i: UInt, head: UInt, n: Integer): UInt = { val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0)) val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0)) Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0) } } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v4.common.BoomModule()(p) with boom.v4.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) if (fastDeq && entries > 1) { // Pipeline dequeue selection so the mux gets an entire cycle val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false)) val out_reg = Reg(gen) val out_valid = RegInit(false.B) val out_uop = Reg(new MicroOp) main.io.enq <> io.enq main.io.brupdate := io.brupdate main.io.flush := io.flush io.empty := main.io.empty && !out_valid io.count := main.io.count + out_valid io.deq.valid := out_valid io.deq.bits := out_reg io.deq.bits.uop := out_uop out_uop := UpdateBrMask(io.brupdate, out_uop) out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop)) main.io.deq.ready := false.B when (io.deq.fire || !out_valid) { out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop)) out_reg := main.io.deq.bits out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop) main.io.deq.ready := true.B } } else { val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop))) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) io.deq.bits := out val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v4.common.BoomModule()(p) with boom.v4.common.HasBoomCoreParameters { val io = IO(new Bundle { val req = Input(Valid(gen)) val flush = Input(Bool()) val brupdate = Input(new BrUpdateInfo) val resp = Output(Vec(stages, Valid(gen))) }) require(stages > 0) val uops = Reg(Vec(stages, Valid(gen))) uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits) uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits) for (i <- 1 until stages) { uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits) uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits) } for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } } io.resp := uops }
module BranchKillableQueue_3( // @[util.scala:458:7] input clock, // @[util.scala:458:7] input reset, // @[util.scala:458:7] output io_enq_ready, // @[util.scala:463:14] input io_enq_valid, // @[util.scala:463:14] input [31:0] io_enq_bits_uop_inst, // @[util.scala:463:14] input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:463:14] input io_enq_bits_uop_is_rvc, // @[util.scala:463:14] input [33:0] io_enq_bits_uop_debug_pc, // @[util.scala:463:14] input io_enq_bits_uop_iq_type_0, // @[util.scala:463:14] input io_enq_bits_uop_iq_type_1, // @[util.scala:463:14] input io_enq_bits_uop_iq_type_2, // @[util.scala:463:14] input io_enq_bits_uop_iq_type_3, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_0, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_1, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_2, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_3, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_4, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_5, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_6, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_7, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_8, // @[util.scala:463:14] input io_enq_bits_uop_fu_code_9, // @[util.scala:463:14] input io_enq_bits_uop_iw_issued, // @[util.scala:463:14] input io_enq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14] input io_enq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14] input io_enq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14] input io_enq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14] input io_enq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14] input io_enq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14] input io_enq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14] input io_enq_bits_uop_dis_col_sel, // @[util.scala:463:14] input [3:0] io_enq_bits_uop_br_mask, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_br_tag, // @[util.scala:463:14] input [3:0] io_enq_bits_uop_br_type, // @[util.scala:463:14] input io_enq_bits_uop_is_sfb, // @[util.scala:463:14] input io_enq_bits_uop_is_fence, // @[util.scala:463:14] input io_enq_bits_uop_is_fencei, // @[util.scala:463:14] input io_enq_bits_uop_is_sfence, // @[util.scala:463:14] input io_enq_bits_uop_is_amo, // @[util.scala:463:14] input io_enq_bits_uop_is_eret, // @[util.scala:463:14] input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14] input io_enq_bits_uop_is_rocc, // @[util.scala:463:14] input io_enq_bits_uop_is_mov, // @[util.scala:463:14] input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:463:14] input io_enq_bits_uop_edge_inst, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:463:14] input io_enq_bits_uop_taken, // @[util.scala:463:14] input io_enq_bits_uop_imm_rename, // @[util.scala:463:14] input [2:0] io_enq_bits_uop_imm_sel, // @[util.scala:463:14] input [4:0] io_enq_bits_uop_pimm, // @[util.scala:463:14] input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_op1_sel, // @[util.scala:463:14] input [2:0] io_enq_bits_uop_op2_sel, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_div, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14] input io_enq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14] input [4:0] io_enq_bits_uop_rob_idx, // @[util.scala:463:14] input [3:0] io_enq_bits_uop_ldq_idx, // @[util.scala:463:14] input [3:0] io_enq_bits_uop_stq_idx, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_pdst, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_prs1, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_prs2, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_prs3, // @[util.scala:463:14] input [3:0] io_enq_bits_uop_ppred, // @[util.scala:463:14] input io_enq_bits_uop_prs1_busy, // @[util.scala:463:14] input io_enq_bits_uop_prs2_busy, // @[util.scala:463:14] input io_enq_bits_uop_prs3_busy, // @[util.scala:463:14] input io_enq_bits_uop_ppred_busy, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_stale_pdst, // @[util.scala:463:14] input io_enq_bits_uop_exception, // @[util.scala:463:14] input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:463:14] input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:463:14] input io_enq_bits_uop_mem_signed, // @[util.scala:463:14] input io_enq_bits_uop_uses_ldq, // @[util.scala:463:14] input io_enq_bits_uop_uses_stq, // @[util.scala:463:14] input io_enq_bits_uop_is_unique, // @[util.scala:463:14] input io_enq_bits_uop_flush_on_commit, // @[util.scala:463:14] input [2:0] io_enq_bits_uop_csr_cmd, // @[util.scala:463:14] input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_ldst, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:463:14] input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:463:14] input io_enq_bits_uop_frs3_en, // @[util.scala:463:14] input io_enq_bits_uop_fcn_dw, // @[util.scala:463:14] input [4:0] io_enq_bits_uop_fcn_op, // @[util.scala:463:14] input io_enq_bits_uop_fp_val, // @[util.scala:463:14] input [2:0] io_enq_bits_uop_fp_rm, // @[util.scala:463:14] input [1:0] io_enq_bits_uop_fp_typ, // @[util.scala:463:14] input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:463:14] input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:463:14] input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:463:14] input io_enq_bits_uop_bp_debug_if, // @[util.scala:463:14] input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:463:14] input [2:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:463:14] input [2:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:463:14] input [33:0] io_enq_bits_addr, // @[util.scala:463:14] input [63:0] io_enq_bits_data, // @[util.scala:463:14] input io_enq_bits_is_hella, // @[util.scala:463:14] input io_enq_bits_tag_match, // @[util.scala:463:14] input [1:0] io_enq_bits_old_meta_coh_state, // @[util.scala:463:14] input [21:0] io_enq_bits_old_meta_tag, // @[util.scala:463:14] input [1:0] io_enq_bits_way_en, // @[util.scala:463:14] input [4:0] io_enq_bits_sdq_id, // @[util.scala:463:14] input io_deq_ready, // @[util.scala:463:14] output io_deq_valid, // @[util.scala:463:14] output [31:0] io_deq_bits_uop_inst, // @[util.scala:463:14] output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:463:14] output io_deq_bits_uop_is_rvc, // @[util.scala:463:14] output [33:0] io_deq_bits_uop_debug_pc, // @[util.scala:463:14] output io_deq_bits_uop_iq_type_0, // @[util.scala:463:14] output io_deq_bits_uop_iq_type_1, // @[util.scala:463:14] output io_deq_bits_uop_iq_type_2, // @[util.scala:463:14] output io_deq_bits_uop_iq_type_3, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_0, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_1, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_2, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_3, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_4, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_5, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_6, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_7, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_8, // @[util.scala:463:14] output io_deq_bits_uop_fu_code_9, // @[util.scala:463:14] output io_deq_bits_uop_iw_issued, // @[util.scala:463:14] output io_deq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14] output io_deq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14] output io_deq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14] output io_deq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14] output io_deq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14] output io_deq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14] output io_deq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14] output io_deq_bits_uop_dis_col_sel, // @[util.scala:463:14] output [3:0] io_deq_bits_uop_br_mask, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_br_tag, // @[util.scala:463:14] output [3:0] io_deq_bits_uop_br_type, // @[util.scala:463:14] output io_deq_bits_uop_is_sfb, // @[util.scala:463:14] output io_deq_bits_uop_is_fence, // @[util.scala:463:14] output io_deq_bits_uop_is_fencei, // @[util.scala:463:14] output io_deq_bits_uop_is_sfence, // @[util.scala:463:14] output io_deq_bits_uop_is_amo, // @[util.scala:463:14] output io_deq_bits_uop_is_eret, // @[util.scala:463:14] output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14] output io_deq_bits_uop_is_rocc, // @[util.scala:463:14] output io_deq_bits_uop_is_mov, // @[util.scala:463:14] output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:463:14] output io_deq_bits_uop_edge_inst, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:463:14] output io_deq_bits_uop_taken, // @[util.scala:463:14] output io_deq_bits_uop_imm_rename, // @[util.scala:463:14] output [2:0] io_deq_bits_uop_imm_sel, // @[util.scala:463:14] output [4:0] io_deq_bits_uop_pimm, // @[util.scala:463:14] output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_op1_sel, // @[util.scala:463:14] output [2:0] io_deq_bits_uop_op2_sel, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_div, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14] output io_deq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14] output [4:0] io_deq_bits_uop_rob_idx, // @[util.scala:463:14] output [3:0] io_deq_bits_uop_ldq_idx, // @[util.scala:463:14] output [3:0] io_deq_bits_uop_stq_idx, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_pdst, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_prs1, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_prs2, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_prs3, // @[util.scala:463:14] output [3:0] io_deq_bits_uop_ppred, // @[util.scala:463:14] output io_deq_bits_uop_prs1_busy, // @[util.scala:463:14] output io_deq_bits_uop_prs2_busy, // @[util.scala:463:14] output io_deq_bits_uop_prs3_busy, // @[util.scala:463:14] output io_deq_bits_uop_ppred_busy, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_stale_pdst, // @[util.scala:463:14] output io_deq_bits_uop_exception, // @[util.scala:463:14] output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:463:14] output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:463:14] output io_deq_bits_uop_mem_signed, // @[util.scala:463:14] output io_deq_bits_uop_uses_ldq, // @[util.scala:463:14] output io_deq_bits_uop_uses_stq, // @[util.scala:463:14] output io_deq_bits_uop_is_unique, // @[util.scala:463:14] output io_deq_bits_uop_flush_on_commit, // @[util.scala:463:14] output [2:0] io_deq_bits_uop_csr_cmd, // @[util.scala:463:14] output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_ldst, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:463:14] output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:463:14] output io_deq_bits_uop_frs3_en, // @[util.scala:463:14] output io_deq_bits_uop_fcn_dw, // @[util.scala:463:14] output [4:0] io_deq_bits_uop_fcn_op, // @[util.scala:463:14] output io_deq_bits_uop_fp_val, // @[util.scala:463:14] output [2:0] io_deq_bits_uop_fp_rm, // @[util.scala:463:14] output [1:0] io_deq_bits_uop_fp_typ, // @[util.scala:463:14] output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:463:14] output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:463:14] output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:463:14] output io_deq_bits_uop_bp_debug_if, // @[util.scala:463:14] output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:463:14] output [2:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:463:14] output [2:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:463:14] output [33:0] io_deq_bits_addr, // @[util.scala:463:14] output [63:0] io_deq_bits_data, // @[util.scala:463:14] output io_deq_bits_is_hella, // @[util.scala:463:14] output io_deq_bits_tag_match, // @[util.scala:463:14] output [1:0] io_deq_bits_old_meta_coh_state, // @[util.scala:463:14] output [21:0] io_deq_bits_old_meta_tag, // @[util.scala:463:14] output [1:0] io_deq_bits_way_en, // @[util.scala:463:14] output [4:0] io_deq_bits_sdq_id, // @[util.scala:463:14] output io_empty // @[util.scala:463:14] ); wire _out_valid_T_12; // @[util.scala:496:38] wire [31:0] _main_io_deq_bits_uop_inst; // @[util.scala:476:22] wire [31:0] _main_io_deq_bits_uop_debug_inst; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_rvc; // @[util.scala:476:22] wire [33:0] _main_io_deq_bits_uop_debug_pc; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iq_type_0; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iq_type_1; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iq_type_2; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iq_type_3; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_0; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_1; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_2; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_3; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_4; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_5; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_6; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_7; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_8; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fu_code_9; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_issued; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_issued_partial_agen; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_issued_partial_dgen; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_p1_speculative_child; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_p2_speculative_child; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_p1_bypass_hint; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_p2_bypass_hint; // @[util.scala:476:22] wire _main_io_deq_bits_uop_iw_p3_bypass_hint; // @[util.scala:476:22] wire _main_io_deq_bits_uop_dis_col_sel; // @[util.scala:476:22] wire [3:0] _main_io_deq_bits_uop_br_mask; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_br_tag; // @[util.scala:476:22] wire [3:0] _main_io_deq_bits_uop_br_type; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_sfb; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_fence; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_fencei; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_sfence; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_amo; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_eret; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_sys_pc2epc; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_rocc; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_mov; // @[util.scala:476:22] wire [3:0] _main_io_deq_bits_uop_ftq_idx; // @[util.scala:476:22] wire _main_io_deq_bits_uop_edge_inst; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_pc_lob; // @[util.scala:476:22] wire _main_io_deq_bits_uop_taken; // @[util.scala:476:22] wire _main_io_deq_bits_uop_imm_rename; // @[util.scala:476:22] wire [2:0] _main_io_deq_bits_uop_imm_sel; // @[util.scala:476:22] wire [4:0] _main_io_deq_bits_uop_pimm; // @[util.scala:476:22] wire [19:0] _main_io_deq_bits_uop_imm_packed; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_op1_sel; // @[util.scala:476:22] wire [2:0] _main_io_deq_bits_uop_op2_sel; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_ldst; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_wen; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_ren1; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_ren2; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_ren3; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_swap12; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_swap23; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_fromint; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_toint; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_fma; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_div; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_sqrt; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_wflags; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_ctrl_vec; // @[util.scala:476:22] wire [4:0] _main_io_deq_bits_uop_rob_idx; // @[util.scala:476:22] wire [3:0] _main_io_deq_bits_uop_ldq_idx; // @[util.scala:476:22] wire [3:0] _main_io_deq_bits_uop_stq_idx; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_rxq_idx; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_pdst; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_prs1; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_prs2; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_prs3; // @[util.scala:476:22] wire [3:0] _main_io_deq_bits_uop_ppred; // @[util.scala:476:22] wire _main_io_deq_bits_uop_prs1_busy; // @[util.scala:476:22] wire _main_io_deq_bits_uop_prs2_busy; // @[util.scala:476:22] wire _main_io_deq_bits_uop_prs3_busy; // @[util.scala:476:22] wire _main_io_deq_bits_uop_ppred_busy; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_stale_pdst; // @[util.scala:476:22] wire _main_io_deq_bits_uop_exception; // @[util.scala:476:22] wire [63:0] _main_io_deq_bits_uop_exc_cause; // @[util.scala:476:22] wire [4:0] _main_io_deq_bits_uop_mem_cmd; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_mem_size; // @[util.scala:476:22] wire _main_io_deq_bits_uop_mem_signed; // @[util.scala:476:22] wire _main_io_deq_bits_uop_uses_ldq; // @[util.scala:476:22] wire _main_io_deq_bits_uop_uses_stq; // @[util.scala:476:22] wire _main_io_deq_bits_uop_is_unique; // @[util.scala:476:22] wire _main_io_deq_bits_uop_flush_on_commit; // @[util.scala:476:22] wire [2:0] _main_io_deq_bits_uop_csr_cmd; // @[util.scala:476:22] wire _main_io_deq_bits_uop_ldst_is_rs1; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_ldst; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_lrs1; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_lrs2; // @[util.scala:476:22] wire [5:0] _main_io_deq_bits_uop_lrs3; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_dst_rtype; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_lrs1_rtype; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_lrs2_rtype; // @[util.scala:476:22] wire _main_io_deq_bits_uop_frs3_en; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fcn_dw; // @[util.scala:476:22] wire [4:0] _main_io_deq_bits_uop_fcn_op; // @[util.scala:476:22] wire _main_io_deq_bits_uop_fp_val; // @[util.scala:476:22] wire [2:0] _main_io_deq_bits_uop_fp_rm; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_uop_fp_typ; // @[util.scala:476:22] wire _main_io_deq_bits_uop_xcpt_pf_if; // @[util.scala:476:22] wire _main_io_deq_bits_uop_xcpt_ae_if; // @[util.scala:476:22] wire _main_io_deq_bits_uop_xcpt_ma_if; // @[util.scala:476:22] wire _main_io_deq_bits_uop_bp_debug_if; // @[util.scala:476:22] wire _main_io_deq_bits_uop_bp_xcpt_if; // @[util.scala:476:22] wire [2:0] _main_io_deq_bits_uop_debug_fsrc; // @[util.scala:476:22] wire [2:0] _main_io_deq_bits_uop_debug_tsrc; // @[util.scala:476:22] wire [33:0] _main_io_deq_bits_addr; // @[util.scala:476:22] wire [63:0] _main_io_deq_bits_data; // @[util.scala:476:22] wire _main_io_deq_bits_is_hella; // @[util.scala:476:22] wire _main_io_deq_bits_tag_match; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_old_meta_coh_state; // @[util.scala:476:22] wire [21:0] _main_io_deq_bits_old_meta_tag; // @[util.scala:476:22] wire [1:0] _main_io_deq_bits_way_en; // @[util.scala:476:22] wire [4:0] _main_io_deq_bits_sdq_id; // @[util.scala:476:22] wire _main_io_empty; // @[util.scala:476:22] wire [3:0] _main_io_count; // @[util.scala:476:22] wire io_enq_valid_0 = io_enq_valid; // @[util.scala:458:7] wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:458:7] wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:458:7] wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:458:7] wire [33:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:458:7] wire io_enq_bits_uop_iq_type_0_0 = io_enq_bits_uop_iq_type_0; // @[util.scala:458:7] wire io_enq_bits_uop_iq_type_1_0 = io_enq_bits_uop_iq_type_1; // @[util.scala:458:7] wire io_enq_bits_uop_iq_type_2_0 = io_enq_bits_uop_iq_type_2; // @[util.scala:458:7] wire io_enq_bits_uop_iq_type_3_0 = io_enq_bits_uop_iq_type_3; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_0_0 = io_enq_bits_uop_fu_code_0; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_1_0 = io_enq_bits_uop_fu_code_1; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_2_0 = io_enq_bits_uop_fu_code_2; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_3_0 = io_enq_bits_uop_fu_code_3; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_4_0 = io_enq_bits_uop_fu_code_4; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_5_0 = io_enq_bits_uop_fu_code_5; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_6_0 = io_enq_bits_uop_fu_code_6; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_7_0 = io_enq_bits_uop_fu_code_7; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_8_0 = io_enq_bits_uop_fu_code_8; // @[util.scala:458:7] wire io_enq_bits_uop_fu_code_9_0 = io_enq_bits_uop_fu_code_9; // @[util.scala:458:7] wire io_enq_bits_uop_iw_issued_0 = io_enq_bits_uop_iw_issued; // @[util.scala:458:7] wire io_enq_bits_uop_iw_issued_partial_agen_0 = io_enq_bits_uop_iw_issued_partial_agen; // @[util.scala:458:7] wire io_enq_bits_uop_iw_issued_partial_dgen_0 = io_enq_bits_uop_iw_issued_partial_dgen; // @[util.scala:458:7] wire io_enq_bits_uop_iw_p1_speculative_child_0 = io_enq_bits_uop_iw_p1_speculative_child; // @[util.scala:458:7] wire io_enq_bits_uop_iw_p2_speculative_child_0 = io_enq_bits_uop_iw_p2_speculative_child; // @[util.scala:458:7] wire io_enq_bits_uop_iw_p1_bypass_hint_0 = io_enq_bits_uop_iw_p1_bypass_hint; // @[util.scala:458:7] wire io_enq_bits_uop_iw_p2_bypass_hint_0 = io_enq_bits_uop_iw_p2_bypass_hint; // @[util.scala:458:7] wire io_enq_bits_uop_iw_p3_bypass_hint_0 = io_enq_bits_uop_iw_p3_bypass_hint; // @[util.scala:458:7] wire io_enq_bits_uop_dis_col_sel_0 = io_enq_bits_uop_dis_col_sel; // @[util.scala:458:7] wire [3:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:458:7] wire [3:0] io_enq_bits_uop_br_type_0 = io_enq_bits_uop_br_type; // @[util.scala:458:7] wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:458:7] wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:458:7] wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:458:7] wire io_enq_bits_uop_is_sfence_0 = io_enq_bits_uop_is_sfence; // @[util.scala:458:7] wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:458:7] wire io_enq_bits_uop_is_eret_0 = io_enq_bits_uop_is_eret; // @[util.scala:458:7] wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:458:7] wire io_enq_bits_uop_is_rocc_0 = io_enq_bits_uop_is_rocc; // @[util.scala:458:7] wire io_enq_bits_uop_is_mov_0 = io_enq_bits_uop_is_mov; // @[util.scala:458:7] wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:458:7] wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:458:7] wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:458:7] wire io_enq_bits_uop_imm_rename_0 = io_enq_bits_uop_imm_rename; // @[util.scala:458:7] wire [2:0] io_enq_bits_uop_imm_sel_0 = io_enq_bits_uop_imm_sel; // @[util.scala:458:7] wire [4:0] io_enq_bits_uop_pimm_0 = io_enq_bits_uop_pimm; // @[util.scala:458:7] wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_op1_sel_0 = io_enq_bits_uop_op1_sel; // @[util.scala:458:7] wire [2:0] io_enq_bits_uop_op2_sel_0 = io_enq_bits_uop_op2_sel; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_ldst_0 = io_enq_bits_uop_fp_ctrl_ldst; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_wen_0 = io_enq_bits_uop_fp_ctrl_wen; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_ren1_0 = io_enq_bits_uop_fp_ctrl_ren1; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_ren2_0 = io_enq_bits_uop_fp_ctrl_ren2; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_ren3_0 = io_enq_bits_uop_fp_ctrl_ren3; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_swap12_0 = io_enq_bits_uop_fp_ctrl_swap12; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_swap23_0 = io_enq_bits_uop_fp_ctrl_swap23; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn_0 = io_enq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut_0 = io_enq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_fromint_0 = io_enq_bits_uop_fp_ctrl_fromint; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_toint_0 = io_enq_bits_uop_fp_ctrl_toint; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_fastpipe_0 = io_enq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_fma_0 = io_enq_bits_uop_fp_ctrl_fma; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_div_0 = io_enq_bits_uop_fp_ctrl_div; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_sqrt_0 = io_enq_bits_uop_fp_ctrl_sqrt; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_wflags_0 = io_enq_bits_uop_fp_ctrl_wflags; // @[util.scala:458:7] wire io_enq_bits_uop_fp_ctrl_vec_0 = io_enq_bits_uop_fp_ctrl_vec; // @[util.scala:458:7] wire [4:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:458:7] wire [3:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:458:7] wire [3:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:458:7] wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:458:7] wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:458:7] wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:458:7] wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:458:7] wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:458:7] wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:458:7] wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:458:7] wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:458:7] wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:458:7] wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:458:7] wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:458:7] wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:458:7] wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:458:7] wire [2:0] io_enq_bits_uop_csr_cmd_0 = io_enq_bits_uop_csr_cmd; // @[util.scala:458:7] wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:458:7] wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:458:7] wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:458:7] wire io_enq_bits_uop_fcn_dw_0 = io_enq_bits_uop_fcn_dw; // @[util.scala:458:7] wire [4:0] io_enq_bits_uop_fcn_op_0 = io_enq_bits_uop_fcn_op; // @[util.scala:458:7] wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:458:7] wire [2:0] io_enq_bits_uop_fp_rm_0 = io_enq_bits_uop_fp_rm; // @[util.scala:458:7] wire [1:0] io_enq_bits_uop_fp_typ_0 = io_enq_bits_uop_fp_typ; // @[util.scala:458:7] wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:458:7] wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:458:7] wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:458:7] wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:458:7] wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:458:7] wire [2:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:458:7] wire [2:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:458:7] wire [33:0] io_enq_bits_addr_0 = io_enq_bits_addr; // @[util.scala:458:7] wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:458:7] wire io_enq_bits_is_hella_0 = io_enq_bits_is_hella; // @[util.scala:458:7] wire io_enq_bits_tag_match_0 = io_enq_bits_tag_match; // @[util.scala:458:7] wire [1:0] io_enq_bits_old_meta_coh_state_0 = io_enq_bits_old_meta_coh_state; // @[util.scala:458:7] wire [21:0] io_enq_bits_old_meta_tag_0 = io_enq_bits_old_meta_tag; // @[util.scala:458:7] wire [1:0] io_enq_bits_way_en_0 = io_enq_bits_way_en; // @[util.scala:458:7] wire [4:0] io_enq_bits_sdq_id_0 = io_enq_bits_sdq_id; // @[util.scala:458:7] wire io_deq_ready_0 = io_deq_ready; // @[util.scala:458:7] wire _out_valid_T_3 = 1'h1; // @[util.scala:492:{31,83}, :496:{41,106}] wire _out_valid_T_6 = 1'h1; // @[util.scala:492:{31,83}, :496:{41,106}] wire _out_valid_T_11 = 1'h1; // @[util.scala:492:{31,83}, :496:{41,106}] wire _out_valid_T_14 = 1'h1; // @[util.scala:492:{31,83}, :496:{41,106}] wire [3:0] _out_uop_out_br_mask_T = 4'hF; // @[util.scala:93:27] wire [3:0] _out_uop_out_br_mask_T_2 = 4'hF; // @[util.scala:93:27] wire [20:0] io_brupdate_b2_target_offset = 21'h0; // @[util.scala:458:7, :463:14, :476:22] wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[util.scala:458:7, :463:14, :476:22] wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[util.scala:458:7, :463:14, :476:22] wire [4:0] io_brupdate_b2_uop_pimm = 5'h0; // @[util.scala:458:7, :463:14, :476:22] wire [4:0] io_brupdate_b2_uop_rob_idx = 5'h0; // @[util.scala:458:7, :463:14, :476:22] wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[util.scala:458:7, :463:14, :476:22] wire [4:0] io_brupdate_b2_uop_fcn_op = 5'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_uop_imm_sel = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_uop_op2_sel = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_uop_csr_cmd = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_uop_fp_rm = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_uop_debug_fsrc = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_uop_debug_tsrc = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_pdst = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_prs1 = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_prs2 = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_prs3 = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_stale_pdst = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_op1_sel = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_uop_fp_typ = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[util.scala:458:7, :463:14, :476:22] wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[util.scala:458:7, :463:14, :476:22] wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[util.scala:458:7, :463:14, :476:22] wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iq_type_0 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iq_type_1 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iq_type_2 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iq_type_3 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_0 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_1 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_2 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_3 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_4 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_5 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_6 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_7 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_8 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fu_code_9 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_issued = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_issued_partial_agen = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_issued_partial_dgen = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_p1_speculative_child = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_p2_speculative_child = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_p1_bypass_hint = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_p2_bypass_hint = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_iw_p3_bypass_hint = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_dis_col_sel = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_fence = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_sfence = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_amo = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_eret = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_rocc = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_mov = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_taken = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_imm_rename = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_ldst = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_wen = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_ren1 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_ren2 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_ren3 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_swap12 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_swap23 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_fromint = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_toint = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_fastpipe = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_fma = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_div = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_sqrt = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_wflags = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_ctrl_vec = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_exception = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_is_unique = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fcn_dw = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_fp_val = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_mispredict = 1'h0; // @[util.scala:458:7] wire io_brupdate_b2_taken = 1'h0; // @[util.scala:458:7] wire io_flush = 1'h0; // @[util.scala:458:7] wire _out_valid_T_1 = 1'h0; // @[util.scala:126:59] wire _out_valid_T_2 = 1'h0; // @[util.scala:61:61] wire _out_valid_T_5 = 1'h0; // @[util.scala:492:94] wire _out_valid_T_9 = 1'h0; // @[util.scala:126:59] wire _out_valid_T_10 = 1'h0; // @[util.scala:61:61] wire _out_valid_T_13 = 1'h0; // @[util.scala:496:117] wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[util.scala:458:7, :463:14, :476:22] wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[util.scala:458:7, :463:14, :476:22] wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b2_uop_br_type = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] _out_valid_T = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire [3:0] _out_valid_T_8 = 4'h0; // @[util.scala:126:51, :458:7, :463:14, :476:22] wire _io_empty_T_1; // @[util.scala:484:31] wire [3:0] _io_count_T_1; // @[util.scala:485:31] wire io_enq_ready_0; // @[util.scala:458:7] wire io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7] wire io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7] wire io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7] wire io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7] wire io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7] wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:458:7] wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7] wire [33:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7] wire io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7] wire io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7] wire [3:0] io_deq_bits_uop_br_mask_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_br_tag_0; // @[util.scala:458:7] wire [3:0] io_deq_bits_uop_br_type_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_fence_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_amo_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_eret_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_mov_0; // @[util.scala:458:7] wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7] wire io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7] wire io_deq_bits_uop_taken_0; // @[util.scala:458:7] wire io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7] wire [2:0] io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7] wire [4:0] io_deq_bits_uop_pimm_0; // @[util.scala:458:7] wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7] wire [2:0] io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7] wire [4:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7] wire [3:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7] wire [3:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_pdst_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_prs1_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_prs2_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_prs3_0; // @[util.scala:458:7] wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:458:7] wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7] wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7] wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7] wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7] wire io_deq_bits_uop_exception_0; // @[util.scala:458:7] wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7] wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:458:7] wire io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7] wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7] wire io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7] wire io_deq_bits_uop_is_unique_0; // @[util.scala:458:7] wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7] wire [2:0] io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7] wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:458:7] wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7] wire io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7] wire io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7] wire [4:0] io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7] wire io_deq_bits_uop_fp_val_0; // @[util.scala:458:7] wire [2:0] io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7] wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7] wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7] wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7] wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7] wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7] wire [2:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7] wire [2:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7] wire [21:0] io_deq_bits_old_meta_tag_0; // @[util.scala:458:7] wire [33:0] io_deq_bits_addr_0; // @[util.scala:458:7] wire [63:0] io_deq_bits_data_0; // @[util.scala:458:7] wire io_deq_bits_is_hella_0; // @[util.scala:458:7] wire io_deq_bits_tag_match_0; // @[util.scala:458:7] wire [1:0] io_deq_bits_way_en_0; // @[util.scala:458:7] wire [4:0] io_deq_bits_sdq_id_0; // @[util.scala:458:7] wire io_deq_valid_0; // @[util.scala:458:7] wire io_empty_0; // @[util.scala:458:7] wire [3:0] io_count; // @[util.scala:458:7] reg [31:0] out_reg_uop_inst; // @[util.scala:477:22] reg [31:0] out_reg_uop_debug_inst; // @[util.scala:477:22] reg out_reg_uop_is_rvc; // @[util.scala:477:22] reg [33:0] out_reg_uop_debug_pc; // @[util.scala:477:22] reg out_reg_uop_iq_type_0; // @[util.scala:477:22] reg out_reg_uop_iq_type_1; // @[util.scala:477:22] reg out_reg_uop_iq_type_2; // @[util.scala:477:22] reg out_reg_uop_iq_type_3; // @[util.scala:477:22] reg out_reg_uop_fu_code_0; // @[util.scala:477:22] reg out_reg_uop_fu_code_1; // @[util.scala:477:22] reg out_reg_uop_fu_code_2; // @[util.scala:477:22] reg out_reg_uop_fu_code_3; // @[util.scala:477:22] reg out_reg_uop_fu_code_4; // @[util.scala:477:22] reg out_reg_uop_fu_code_5; // @[util.scala:477:22] reg out_reg_uop_fu_code_6; // @[util.scala:477:22] reg out_reg_uop_fu_code_7; // @[util.scala:477:22] reg out_reg_uop_fu_code_8; // @[util.scala:477:22] reg out_reg_uop_fu_code_9; // @[util.scala:477:22] reg out_reg_uop_iw_issued; // @[util.scala:477:22] reg out_reg_uop_iw_issued_partial_agen; // @[util.scala:477:22] reg out_reg_uop_iw_issued_partial_dgen; // @[util.scala:477:22] reg out_reg_uop_iw_p1_speculative_child; // @[util.scala:477:22] reg out_reg_uop_iw_p2_speculative_child; // @[util.scala:477:22] reg out_reg_uop_iw_p1_bypass_hint; // @[util.scala:477:22] reg out_reg_uop_iw_p2_bypass_hint; // @[util.scala:477:22] reg out_reg_uop_iw_p3_bypass_hint; // @[util.scala:477:22] reg out_reg_uop_dis_col_sel; // @[util.scala:477:22] reg [3:0] out_reg_uop_br_mask; // @[util.scala:477:22] reg [1:0] out_reg_uop_br_tag; // @[util.scala:477:22] reg [3:0] out_reg_uop_br_type; // @[util.scala:477:22] reg out_reg_uop_is_sfb; // @[util.scala:477:22] reg out_reg_uop_is_fence; // @[util.scala:477:22] reg out_reg_uop_is_fencei; // @[util.scala:477:22] reg out_reg_uop_is_sfence; // @[util.scala:477:22] reg out_reg_uop_is_amo; // @[util.scala:477:22] reg out_reg_uop_is_eret; // @[util.scala:477:22] reg out_reg_uop_is_sys_pc2epc; // @[util.scala:477:22] reg out_reg_uop_is_rocc; // @[util.scala:477:22] reg out_reg_uop_is_mov; // @[util.scala:477:22] reg [3:0] out_reg_uop_ftq_idx; // @[util.scala:477:22] reg out_reg_uop_edge_inst; // @[util.scala:477:22] reg [5:0] out_reg_uop_pc_lob; // @[util.scala:477:22] reg out_reg_uop_taken; // @[util.scala:477:22] reg out_reg_uop_imm_rename; // @[util.scala:477:22] reg [2:0] out_reg_uop_imm_sel; // @[util.scala:477:22] reg [4:0] out_reg_uop_pimm; // @[util.scala:477:22] reg [19:0] out_reg_uop_imm_packed; // @[util.scala:477:22] reg [1:0] out_reg_uop_op1_sel; // @[util.scala:477:22] reg [2:0] out_reg_uop_op2_sel; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_ldst; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_wen; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_ren1; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_ren2; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_ren3; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_swap12; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_swap23; // @[util.scala:477:22] reg [1:0] out_reg_uop_fp_ctrl_typeTagIn; // @[util.scala:477:22] reg [1:0] out_reg_uop_fp_ctrl_typeTagOut; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_fromint; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_toint; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_fastpipe; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_fma; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_div; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_sqrt; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_wflags; // @[util.scala:477:22] reg out_reg_uop_fp_ctrl_vec; // @[util.scala:477:22] reg [4:0] out_reg_uop_rob_idx; // @[util.scala:477:22] reg [3:0] out_reg_uop_ldq_idx; // @[util.scala:477:22] reg [3:0] out_reg_uop_stq_idx; // @[util.scala:477:22] reg [1:0] out_reg_uop_rxq_idx; // @[util.scala:477:22] reg [5:0] out_reg_uop_pdst; // @[util.scala:477:22] reg [5:0] out_reg_uop_prs1; // @[util.scala:477:22] reg [5:0] out_reg_uop_prs2; // @[util.scala:477:22] reg [5:0] out_reg_uop_prs3; // @[util.scala:477:22] reg [3:0] out_reg_uop_ppred; // @[util.scala:477:22] reg out_reg_uop_prs1_busy; // @[util.scala:477:22] reg out_reg_uop_prs2_busy; // @[util.scala:477:22] reg out_reg_uop_prs3_busy; // @[util.scala:477:22] reg out_reg_uop_ppred_busy; // @[util.scala:477:22] reg [5:0] out_reg_uop_stale_pdst; // @[util.scala:477:22] reg out_reg_uop_exception; // @[util.scala:477:22] reg [63:0] out_reg_uop_exc_cause; // @[util.scala:477:22] reg [4:0] out_reg_uop_mem_cmd; // @[util.scala:477:22] reg [1:0] out_reg_uop_mem_size; // @[util.scala:477:22] reg out_reg_uop_mem_signed; // @[util.scala:477:22] reg out_reg_uop_uses_ldq; // @[util.scala:477:22] reg out_reg_uop_uses_stq; // @[util.scala:477:22] reg out_reg_uop_is_unique; // @[util.scala:477:22] reg out_reg_uop_flush_on_commit; // @[util.scala:477:22] reg [2:0] out_reg_uop_csr_cmd; // @[util.scala:477:22] reg out_reg_uop_ldst_is_rs1; // @[util.scala:477:22] reg [5:0] out_reg_uop_ldst; // @[util.scala:477:22] reg [5:0] out_reg_uop_lrs1; // @[util.scala:477:22] reg [5:0] out_reg_uop_lrs2; // @[util.scala:477:22] reg [5:0] out_reg_uop_lrs3; // @[util.scala:477:22] reg [1:0] out_reg_uop_dst_rtype; // @[util.scala:477:22] reg [1:0] out_reg_uop_lrs1_rtype; // @[util.scala:477:22] reg [1:0] out_reg_uop_lrs2_rtype; // @[util.scala:477:22] reg out_reg_uop_frs3_en; // @[util.scala:477:22] reg out_reg_uop_fcn_dw; // @[util.scala:477:22] reg [4:0] out_reg_uop_fcn_op; // @[util.scala:477:22] reg out_reg_uop_fp_val; // @[util.scala:477:22] reg [2:0] out_reg_uop_fp_rm; // @[util.scala:477:22] reg [1:0] out_reg_uop_fp_typ; // @[util.scala:477:22] reg out_reg_uop_xcpt_pf_if; // @[util.scala:477:22] reg out_reg_uop_xcpt_ae_if; // @[util.scala:477:22] reg out_reg_uop_xcpt_ma_if; // @[util.scala:477:22] reg out_reg_uop_bp_debug_if; // @[util.scala:477:22] reg out_reg_uop_bp_xcpt_if; // @[util.scala:477:22] reg [2:0] out_reg_uop_debug_fsrc; // @[util.scala:477:22] reg [2:0] out_reg_uop_debug_tsrc; // @[util.scala:477:22] reg [33:0] out_reg_addr; // @[util.scala:477:22] assign io_deq_bits_addr_0 = out_reg_addr; // @[util.scala:458:7, :477:22] reg [63:0] out_reg_data; // @[util.scala:477:22] assign io_deq_bits_data_0 = out_reg_data; // @[util.scala:458:7, :477:22] reg out_reg_is_hella; // @[util.scala:477:22] assign io_deq_bits_is_hella_0 = out_reg_is_hella; // @[util.scala:458:7, :477:22] reg out_reg_tag_match; // @[util.scala:477:22] assign io_deq_bits_tag_match_0 = out_reg_tag_match; // @[util.scala:458:7, :477:22] reg [1:0] out_reg_old_meta_coh_state; // @[util.scala:477:22] assign io_deq_bits_old_meta_coh_state_0 = out_reg_old_meta_coh_state; // @[util.scala:458:7, :477:22] reg [21:0] out_reg_old_meta_tag; // @[util.scala:477:22] assign io_deq_bits_old_meta_tag_0 = out_reg_old_meta_tag; // @[util.scala:458:7, :477:22] reg [1:0] out_reg_way_en; // @[util.scala:477:22] assign io_deq_bits_way_en_0 = out_reg_way_en; // @[util.scala:458:7, :477:22] reg [4:0] out_reg_sdq_id; // @[util.scala:477:22] assign io_deq_bits_sdq_id_0 = out_reg_sdq_id; // @[util.scala:458:7, :477:22] reg out_valid; // @[util.scala:478:28] assign io_deq_valid_0 = out_valid; // @[util.scala:458:7, :478:28] wire _out_valid_T_4 = out_valid; // @[util.scala:478:28, :492:28] reg [31:0] out_uop_inst; // @[util.scala:479:22] assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:458:7, :479:22] wire [31:0] out_uop_out_inst = out_uop_inst; // @[util.scala:104:23, :479:22] reg [31:0] out_uop_debug_inst; // @[util.scala:479:22] assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:458:7, :479:22] wire [31:0] out_uop_out_debug_inst = out_uop_debug_inst; // @[util.scala:104:23, :479:22] reg out_uop_is_rvc; // @[util.scala:479:22] assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:458:7, :479:22] wire out_uop_out_is_rvc = out_uop_is_rvc; // @[util.scala:104:23, :479:22] reg [33:0] out_uop_debug_pc; // @[util.scala:479:22] assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:458:7, :479:22] wire [33:0] out_uop_out_debug_pc = out_uop_debug_pc; // @[util.scala:104:23, :479:22] reg out_uop_iq_type_0; // @[util.scala:479:22] assign io_deq_bits_uop_iq_type_0_0 = out_uop_iq_type_0; // @[util.scala:458:7, :479:22] wire out_uop_out_iq_type_0 = out_uop_iq_type_0; // @[util.scala:104:23, :479:22] reg out_uop_iq_type_1; // @[util.scala:479:22] assign io_deq_bits_uop_iq_type_1_0 = out_uop_iq_type_1; // @[util.scala:458:7, :479:22] wire out_uop_out_iq_type_1 = out_uop_iq_type_1; // @[util.scala:104:23, :479:22] reg out_uop_iq_type_2; // @[util.scala:479:22] assign io_deq_bits_uop_iq_type_2_0 = out_uop_iq_type_2; // @[util.scala:458:7, :479:22] wire out_uop_out_iq_type_2 = out_uop_iq_type_2; // @[util.scala:104:23, :479:22] reg out_uop_iq_type_3; // @[util.scala:479:22] assign io_deq_bits_uop_iq_type_3_0 = out_uop_iq_type_3; // @[util.scala:458:7, :479:22] wire out_uop_out_iq_type_3 = out_uop_iq_type_3; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_0; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_0_0 = out_uop_fu_code_0; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_0 = out_uop_fu_code_0; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_1; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_1_0 = out_uop_fu_code_1; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_1 = out_uop_fu_code_1; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_2; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_2_0 = out_uop_fu_code_2; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_2 = out_uop_fu_code_2; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_3; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_3_0 = out_uop_fu_code_3; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_3 = out_uop_fu_code_3; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_4; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_4_0 = out_uop_fu_code_4; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_4 = out_uop_fu_code_4; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_5; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_5_0 = out_uop_fu_code_5; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_5 = out_uop_fu_code_5; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_6; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_6_0 = out_uop_fu_code_6; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_6 = out_uop_fu_code_6; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_7; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_7_0 = out_uop_fu_code_7; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_7 = out_uop_fu_code_7; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_8; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_8_0 = out_uop_fu_code_8; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_8 = out_uop_fu_code_8; // @[util.scala:104:23, :479:22] reg out_uop_fu_code_9; // @[util.scala:479:22] assign io_deq_bits_uop_fu_code_9_0 = out_uop_fu_code_9; // @[util.scala:458:7, :479:22] wire out_uop_out_fu_code_9 = out_uop_fu_code_9; // @[util.scala:104:23, :479:22] reg out_uop_iw_issued; // @[util.scala:479:22] assign io_deq_bits_uop_iw_issued_0 = out_uop_iw_issued; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_issued = out_uop_iw_issued; // @[util.scala:104:23, :479:22] reg out_uop_iw_issued_partial_agen; // @[util.scala:479:22] assign io_deq_bits_uop_iw_issued_partial_agen_0 = out_uop_iw_issued_partial_agen; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_issued_partial_agen = out_uop_iw_issued_partial_agen; // @[util.scala:104:23, :479:22] reg out_uop_iw_issued_partial_dgen; // @[util.scala:479:22] assign io_deq_bits_uop_iw_issued_partial_dgen_0 = out_uop_iw_issued_partial_dgen; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_issued_partial_dgen = out_uop_iw_issued_partial_dgen; // @[util.scala:104:23, :479:22] reg out_uop_iw_p1_speculative_child; // @[util.scala:479:22] assign io_deq_bits_uop_iw_p1_speculative_child_0 = out_uop_iw_p1_speculative_child; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_p1_speculative_child = out_uop_iw_p1_speculative_child; // @[util.scala:104:23, :479:22] reg out_uop_iw_p2_speculative_child; // @[util.scala:479:22] assign io_deq_bits_uop_iw_p2_speculative_child_0 = out_uop_iw_p2_speculative_child; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_p2_speculative_child = out_uop_iw_p2_speculative_child; // @[util.scala:104:23, :479:22] reg out_uop_iw_p1_bypass_hint; // @[util.scala:479:22] assign io_deq_bits_uop_iw_p1_bypass_hint_0 = out_uop_iw_p1_bypass_hint; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_p1_bypass_hint = out_uop_iw_p1_bypass_hint; // @[util.scala:104:23, :479:22] reg out_uop_iw_p2_bypass_hint; // @[util.scala:479:22] assign io_deq_bits_uop_iw_p2_bypass_hint_0 = out_uop_iw_p2_bypass_hint; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_p2_bypass_hint = out_uop_iw_p2_bypass_hint; // @[util.scala:104:23, :479:22] reg out_uop_iw_p3_bypass_hint; // @[util.scala:479:22] assign io_deq_bits_uop_iw_p3_bypass_hint_0 = out_uop_iw_p3_bypass_hint; // @[util.scala:458:7, :479:22] wire out_uop_out_iw_p3_bypass_hint = out_uop_iw_p3_bypass_hint; // @[util.scala:104:23, :479:22] reg out_uop_dis_col_sel; // @[util.scala:479:22] assign io_deq_bits_uop_dis_col_sel_0 = out_uop_dis_col_sel; // @[util.scala:458:7, :479:22] wire out_uop_out_dis_col_sel = out_uop_dis_col_sel; // @[util.scala:104:23, :479:22] reg [3:0] out_uop_br_mask; // @[util.scala:479:22] assign io_deq_bits_uop_br_mask_0 = out_uop_br_mask; // @[util.scala:458:7, :479:22] wire [3:0] _out_uop_out_br_mask_T_1 = out_uop_br_mask; // @[util.scala:93:25, :479:22] reg [1:0] out_uop_br_tag; // @[util.scala:479:22] assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_br_tag = out_uop_br_tag; // @[util.scala:104:23, :479:22] reg [3:0] out_uop_br_type; // @[util.scala:479:22] assign io_deq_bits_uop_br_type_0 = out_uop_br_type; // @[util.scala:458:7, :479:22] wire [3:0] out_uop_out_br_type = out_uop_br_type; // @[util.scala:104:23, :479:22] reg out_uop_is_sfb; // @[util.scala:479:22] assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:458:7, :479:22] wire out_uop_out_is_sfb = out_uop_is_sfb; // @[util.scala:104:23, :479:22] reg out_uop_is_fence; // @[util.scala:479:22] assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:458:7, :479:22] wire out_uop_out_is_fence = out_uop_is_fence; // @[util.scala:104:23, :479:22] reg out_uop_is_fencei; // @[util.scala:479:22] assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:458:7, :479:22] wire out_uop_out_is_fencei = out_uop_is_fencei; // @[util.scala:104:23, :479:22] reg out_uop_is_sfence; // @[util.scala:479:22] assign io_deq_bits_uop_is_sfence_0 = out_uop_is_sfence; // @[util.scala:458:7, :479:22] wire out_uop_out_is_sfence = out_uop_is_sfence; // @[util.scala:104:23, :479:22] reg out_uop_is_amo; // @[util.scala:479:22] assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:458:7, :479:22] wire out_uop_out_is_amo = out_uop_is_amo; // @[util.scala:104:23, :479:22] reg out_uop_is_eret; // @[util.scala:479:22] assign io_deq_bits_uop_is_eret_0 = out_uop_is_eret; // @[util.scala:458:7, :479:22] wire out_uop_out_is_eret = out_uop_is_eret; // @[util.scala:104:23, :479:22] reg out_uop_is_sys_pc2epc; // @[util.scala:479:22] assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:458:7, :479:22] wire out_uop_out_is_sys_pc2epc = out_uop_is_sys_pc2epc; // @[util.scala:104:23, :479:22] reg out_uop_is_rocc; // @[util.scala:479:22] assign io_deq_bits_uop_is_rocc_0 = out_uop_is_rocc; // @[util.scala:458:7, :479:22] wire out_uop_out_is_rocc = out_uop_is_rocc; // @[util.scala:104:23, :479:22] reg out_uop_is_mov; // @[util.scala:479:22] assign io_deq_bits_uop_is_mov_0 = out_uop_is_mov; // @[util.scala:458:7, :479:22] wire out_uop_out_is_mov = out_uop_is_mov; // @[util.scala:104:23, :479:22] reg [3:0] out_uop_ftq_idx; // @[util.scala:479:22] assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:458:7, :479:22] wire [3:0] out_uop_out_ftq_idx = out_uop_ftq_idx; // @[util.scala:104:23, :479:22] reg out_uop_edge_inst; // @[util.scala:479:22] assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:458:7, :479:22] wire out_uop_out_edge_inst = out_uop_edge_inst; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_pc_lob; // @[util.scala:479:22] assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_pc_lob = out_uop_pc_lob; // @[util.scala:104:23, :479:22] reg out_uop_taken; // @[util.scala:479:22] assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:458:7, :479:22] wire out_uop_out_taken = out_uop_taken; // @[util.scala:104:23, :479:22] reg out_uop_imm_rename; // @[util.scala:479:22] assign io_deq_bits_uop_imm_rename_0 = out_uop_imm_rename; // @[util.scala:458:7, :479:22] wire out_uop_out_imm_rename = out_uop_imm_rename; // @[util.scala:104:23, :479:22] reg [2:0] out_uop_imm_sel; // @[util.scala:479:22] assign io_deq_bits_uop_imm_sel_0 = out_uop_imm_sel; // @[util.scala:458:7, :479:22] wire [2:0] out_uop_out_imm_sel = out_uop_imm_sel; // @[util.scala:104:23, :479:22] reg [4:0] out_uop_pimm; // @[util.scala:479:22] assign io_deq_bits_uop_pimm_0 = out_uop_pimm; // @[util.scala:458:7, :479:22] wire [4:0] out_uop_out_pimm = out_uop_pimm; // @[util.scala:104:23, :479:22] reg [19:0] out_uop_imm_packed; // @[util.scala:479:22] assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:458:7, :479:22] wire [19:0] out_uop_out_imm_packed = out_uop_imm_packed; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_op1_sel; // @[util.scala:479:22] assign io_deq_bits_uop_op1_sel_0 = out_uop_op1_sel; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_op1_sel = out_uop_op1_sel; // @[util.scala:104:23, :479:22] reg [2:0] out_uop_op2_sel; // @[util.scala:479:22] assign io_deq_bits_uop_op2_sel_0 = out_uop_op2_sel; // @[util.scala:458:7, :479:22] wire [2:0] out_uop_out_op2_sel = out_uop_op2_sel; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_ldst; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_ldst_0 = out_uop_fp_ctrl_ldst; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_ldst = out_uop_fp_ctrl_ldst; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_wen; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_wen_0 = out_uop_fp_ctrl_wen; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_wen = out_uop_fp_ctrl_wen; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_ren1; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_ren1_0 = out_uop_fp_ctrl_ren1; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_ren1 = out_uop_fp_ctrl_ren1; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_ren2; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_ren2_0 = out_uop_fp_ctrl_ren2; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_ren2 = out_uop_fp_ctrl_ren2; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_ren3; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_ren3_0 = out_uop_fp_ctrl_ren3; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_ren3 = out_uop_fp_ctrl_ren3; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_swap12; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_swap12_0 = out_uop_fp_ctrl_swap12; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_swap12 = out_uop_fp_ctrl_swap12; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_swap23; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_swap23_0 = out_uop_fp_ctrl_swap23; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_swap23 = out_uop_fp_ctrl_swap23; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_fp_ctrl_typeTagIn; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_typeTagIn_0 = out_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_fp_ctrl_typeTagIn = out_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_fp_ctrl_typeTagOut; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_typeTagOut_0 = out_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_fp_ctrl_typeTagOut = out_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_fromint; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_fromint_0 = out_uop_fp_ctrl_fromint; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_fromint = out_uop_fp_ctrl_fromint; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_toint; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_toint_0 = out_uop_fp_ctrl_toint; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_toint = out_uop_fp_ctrl_toint; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_fastpipe; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_fastpipe_0 = out_uop_fp_ctrl_fastpipe; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_fastpipe = out_uop_fp_ctrl_fastpipe; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_fma; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_fma_0 = out_uop_fp_ctrl_fma; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_fma = out_uop_fp_ctrl_fma; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_div; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_div_0 = out_uop_fp_ctrl_div; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_div = out_uop_fp_ctrl_div; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_sqrt; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_sqrt_0 = out_uop_fp_ctrl_sqrt; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_sqrt = out_uop_fp_ctrl_sqrt; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_wflags; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_wflags_0 = out_uop_fp_ctrl_wflags; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_wflags = out_uop_fp_ctrl_wflags; // @[util.scala:104:23, :479:22] reg out_uop_fp_ctrl_vec; // @[util.scala:479:22] assign io_deq_bits_uop_fp_ctrl_vec_0 = out_uop_fp_ctrl_vec; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_ctrl_vec = out_uop_fp_ctrl_vec; // @[util.scala:104:23, :479:22] reg [4:0] out_uop_rob_idx; // @[util.scala:479:22] assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:458:7, :479:22] wire [4:0] out_uop_out_rob_idx = out_uop_rob_idx; // @[util.scala:104:23, :479:22] reg [3:0] out_uop_ldq_idx; // @[util.scala:479:22] assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:458:7, :479:22] wire [3:0] out_uop_out_ldq_idx = out_uop_ldq_idx; // @[util.scala:104:23, :479:22] reg [3:0] out_uop_stq_idx; // @[util.scala:479:22] assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:458:7, :479:22] wire [3:0] out_uop_out_stq_idx = out_uop_stq_idx; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_rxq_idx; // @[util.scala:479:22] assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_rxq_idx = out_uop_rxq_idx; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_pdst; // @[util.scala:479:22] assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_pdst = out_uop_pdst; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_prs1; // @[util.scala:479:22] assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_prs1 = out_uop_prs1; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_prs2; // @[util.scala:479:22] assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_prs2 = out_uop_prs2; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_prs3; // @[util.scala:479:22] assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_prs3 = out_uop_prs3; // @[util.scala:104:23, :479:22] reg [3:0] out_uop_ppred; // @[util.scala:479:22] assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:458:7, :479:22] wire [3:0] out_uop_out_ppred = out_uop_ppred; // @[util.scala:104:23, :479:22] reg out_uop_prs1_busy; // @[util.scala:479:22] assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:458:7, :479:22] wire out_uop_out_prs1_busy = out_uop_prs1_busy; // @[util.scala:104:23, :479:22] reg out_uop_prs2_busy; // @[util.scala:479:22] assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:458:7, :479:22] wire out_uop_out_prs2_busy = out_uop_prs2_busy; // @[util.scala:104:23, :479:22] reg out_uop_prs3_busy; // @[util.scala:479:22] assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:458:7, :479:22] wire out_uop_out_prs3_busy = out_uop_prs3_busy; // @[util.scala:104:23, :479:22] reg out_uop_ppred_busy; // @[util.scala:479:22] assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:458:7, :479:22] wire out_uop_out_ppred_busy = out_uop_ppred_busy; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_stale_pdst; // @[util.scala:479:22] assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_stale_pdst = out_uop_stale_pdst; // @[util.scala:104:23, :479:22] reg out_uop_exception; // @[util.scala:479:22] assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:458:7, :479:22] wire out_uop_out_exception = out_uop_exception; // @[util.scala:104:23, :479:22] reg [63:0] out_uop_exc_cause; // @[util.scala:479:22] assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:458:7, :479:22] wire [63:0] out_uop_out_exc_cause = out_uop_exc_cause; // @[util.scala:104:23, :479:22] reg [4:0] out_uop_mem_cmd; // @[util.scala:479:22] assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:458:7, :479:22] wire [4:0] out_uop_out_mem_cmd = out_uop_mem_cmd; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_mem_size; // @[util.scala:479:22] assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_mem_size = out_uop_mem_size; // @[util.scala:104:23, :479:22] reg out_uop_mem_signed; // @[util.scala:479:22] assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:458:7, :479:22] wire out_uop_out_mem_signed = out_uop_mem_signed; // @[util.scala:104:23, :479:22] reg out_uop_uses_ldq; // @[util.scala:479:22] assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:458:7, :479:22] wire out_uop_out_uses_ldq = out_uop_uses_ldq; // @[util.scala:104:23, :479:22] reg out_uop_uses_stq; // @[util.scala:479:22] assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:458:7, :479:22] wire out_uop_out_uses_stq = out_uop_uses_stq; // @[util.scala:104:23, :479:22] reg out_uop_is_unique; // @[util.scala:479:22] assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:458:7, :479:22] wire out_uop_out_is_unique = out_uop_is_unique; // @[util.scala:104:23, :479:22] reg out_uop_flush_on_commit; // @[util.scala:479:22] assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:458:7, :479:22] wire out_uop_out_flush_on_commit = out_uop_flush_on_commit; // @[util.scala:104:23, :479:22] reg [2:0] out_uop_csr_cmd; // @[util.scala:479:22] assign io_deq_bits_uop_csr_cmd_0 = out_uop_csr_cmd; // @[util.scala:458:7, :479:22] wire [2:0] out_uop_out_csr_cmd = out_uop_csr_cmd; // @[util.scala:104:23, :479:22] reg out_uop_ldst_is_rs1; // @[util.scala:479:22] assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:458:7, :479:22] wire out_uop_out_ldst_is_rs1 = out_uop_ldst_is_rs1; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_ldst; // @[util.scala:479:22] assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_ldst = out_uop_ldst; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_lrs1; // @[util.scala:479:22] assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_lrs1 = out_uop_lrs1; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_lrs2; // @[util.scala:479:22] assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_lrs2 = out_uop_lrs2; // @[util.scala:104:23, :479:22] reg [5:0] out_uop_lrs3; // @[util.scala:479:22] assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:458:7, :479:22] wire [5:0] out_uop_out_lrs3 = out_uop_lrs3; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_dst_rtype; // @[util.scala:479:22] assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_dst_rtype = out_uop_dst_rtype; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_lrs1_rtype; // @[util.scala:479:22] assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_lrs1_rtype = out_uop_lrs1_rtype; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_lrs2_rtype; // @[util.scala:479:22] assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_lrs2_rtype = out_uop_lrs2_rtype; // @[util.scala:104:23, :479:22] reg out_uop_frs3_en; // @[util.scala:479:22] assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:458:7, :479:22] wire out_uop_out_frs3_en = out_uop_frs3_en; // @[util.scala:104:23, :479:22] reg out_uop_fcn_dw; // @[util.scala:479:22] assign io_deq_bits_uop_fcn_dw_0 = out_uop_fcn_dw; // @[util.scala:458:7, :479:22] wire out_uop_out_fcn_dw = out_uop_fcn_dw; // @[util.scala:104:23, :479:22] reg [4:0] out_uop_fcn_op; // @[util.scala:479:22] assign io_deq_bits_uop_fcn_op_0 = out_uop_fcn_op; // @[util.scala:458:7, :479:22] wire [4:0] out_uop_out_fcn_op = out_uop_fcn_op; // @[util.scala:104:23, :479:22] reg out_uop_fp_val; // @[util.scala:479:22] assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:458:7, :479:22] wire out_uop_out_fp_val = out_uop_fp_val; // @[util.scala:104:23, :479:22] reg [2:0] out_uop_fp_rm; // @[util.scala:479:22] assign io_deq_bits_uop_fp_rm_0 = out_uop_fp_rm; // @[util.scala:458:7, :479:22] wire [2:0] out_uop_out_fp_rm = out_uop_fp_rm; // @[util.scala:104:23, :479:22] reg [1:0] out_uop_fp_typ; // @[util.scala:479:22] assign io_deq_bits_uop_fp_typ_0 = out_uop_fp_typ; // @[util.scala:458:7, :479:22] wire [1:0] out_uop_out_fp_typ = out_uop_fp_typ; // @[util.scala:104:23, :479:22] reg out_uop_xcpt_pf_if; // @[util.scala:479:22] assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:458:7, :479:22] wire out_uop_out_xcpt_pf_if = out_uop_xcpt_pf_if; // @[util.scala:104:23, :479:22] reg out_uop_xcpt_ae_if; // @[util.scala:479:22] assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:458:7, :479:22] wire out_uop_out_xcpt_ae_if = out_uop_xcpt_ae_if; // @[util.scala:104:23, :479:22] reg out_uop_xcpt_ma_if; // @[util.scala:479:22] assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:458:7, :479:22] wire out_uop_out_xcpt_ma_if = out_uop_xcpt_ma_if; // @[util.scala:104:23, :479:22] reg out_uop_bp_debug_if; // @[util.scala:479:22] assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:458:7, :479:22] wire out_uop_out_bp_debug_if = out_uop_bp_debug_if; // @[util.scala:104:23, :479:22] reg out_uop_bp_xcpt_if; // @[util.scala:479:22] assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:458:7, :479:22] wire out_uop_out_bp_xcpt_if = out_uop_bp_xcpt_if; // @[util.scala:104:23, :479:22] reg [2:0] out_uop_debug_fsrc; // @[util.scala:479:22] assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:458:7, :479:22] wire [2:0] out_uop_out_debug_fsrc = out_uop_debug_fsrc; // @[util.scala:104:23, :479:22] reg [2:0] out_uop_debug_tsrc; // @[util.scala:479:22] assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:458:7, :479:22] wire [2:0] out_uop_out_debug_tsrc = out_uop_debug_tsrc; // @[util.scala:104:23, :479:22] wire _io_empty_T = ~out_valid; // @[util.scala:478:28, :484:34] assign _io_empty_T_1 = _main_io_empty & _io_empty_T; // @[util.scala:476:22, :484:{31,34}] assign io_empty_0 = _io_empty_T_1; // @[util.scala:458:7, :484:31] wire [4:0] _io_count_T = {1'h0, _main_io_count} + {4'h0, out_valid}; // @[util.scala:126:51, :458:7, :463:14, :476:22, :478:28, :485:31] assign _io_count_T_1 = _io_count_T[3:0]; // @[util.scala:485:31] assign io_count = _io_count_T_1; // @[util.scala:458:7, :485:31] wire [3:0] out_uop_out_br_mask; // @[util.scala:104:23] assign out_uop_out_br_mask = _out_uop_out_br_mask_T_1; // @[util.scala:93:25, :104:23] wire _out_valid_T_7 = _out_valid_T_4; // @[util.scala:492:{28,80}] wire main_io_deq_ready = io_deq_ready_0 & io_deq_valid_0 | ~out_valid; // @[Decoupled.scala:51:35] wire _out_valid_T_15 = _out_valid_T_12; // @[util.scala:496:{38,103}] wire [3:0] _out_uop_out_br_mask_T_3; // @[util.scala:93:25] wire out_uop_out_1_iq_type_0; // @[util.scala:104:23] wire out_uop_out_1_iq_type_1; // @[util.scala:104:23] wire out_uop_out_1_iq_type_2; // @[util.scala:104:23] wire out_uop_out_1_iq_type_3; // @[util.scala:104:23] wire out_uop_out_1_fu_code_0; // @[util.scala:104:23] wire out_uop_out_1_fu_code_1; // @[util.scala:104:23] wire out_uop_out_1_fu_code_2; // @[util.scala:104:23] wire out_uop_out_1_fu_code_3; // @[util.scala:104:23] wire out_uop_out_1_fu_code_4; // @[util.scala:104:23] wire out_uop_out_1_fu_code_5; // @[util.scala:104:23] wire out_uop_out_1_fu_code_6; // @[util.scala:104:23] wire out_uop_out_1_fu_code_7; // @[util.scala:104:23] wire out_uop_out_1_fu_code_8; // @[util.scala:104:23] wire out_uop_out_1_fu_code_9; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_ldst; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_wen; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_ren1; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_ren2; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_ren3; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_swap12; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_swap23; // @[util.scala:104:23] wire [1:0] out_uop_out_1_fp_ctrl_typeTagIn; // @[util.scala:104:23] wire [1:0] out_uop_out_1_fp_ctrl_typeTagOut; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_fromint; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_toint; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_fastpipe; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_fma; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_div; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_sqrt; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_wflags; // @[util.scala:104:23] wire out_uop_out_1_fp_ctrl_vec; // @[util.scala:104:23] wire [31:0] out_uop_out_1_inst; // @[util.scala:104:23] wire [31:0] out_uop_out_1_debug_inst; // @[util.scala:104:23] wire out_uop_out_1_is_rvc; // @[util.scala:104:23] wire [33:0] out_uop_out_1_debug_pc; // @[util.scala:104:23] wire out_uop_out_1_iw_issued; // @[util.scala:104:23] wire out_uop_out_1_iw_issued_partial_agen; // @[util.scala:104:23] wire out_uop_out_1_iw_issued_partial_dgen; // @[util.scala:104:23] wire out_uop_out_1_iw_p1_speculative_child; // @[util.scala:104:23] wire out_uop_out_1_iw_p2_speculative_child; // @[util.scala:104:23] wire out_uop_out_1_iw_p1_bypass_hint; // @[util.scala:104:23] wire out_uop_out_1_iw_p2_bypass_hint; // @[util.scala:104:23] wire out_uop_out_1_iw_p3_bypass_hint; // @[util.scala:104:23] wire out_uop_out_1_dis_col_sel; // @[util.scala:104:23] wire [3:0] out_uop_out_1_br_mask; // @[util.scala:104:23] wire [1:0] out_uop_out_1_br_tag; // @[util.scala:104:23] wire [3:0] out_uop_out_1_br_type; // @[util.scala:104:23] wire out_uop_out_1_is_sfb; // @[util.scala:104:23] wire out_uop_out_1_is_fence; // @[util.scala:104:23] wire out_uop_out_1_is_fencei; // @[util.scala:104:23] wire out_uop_out_1_is_sfence; // @[util.scala:104:23] wire out_uop_out_1_is_amo; // @[util.scala:104:23] wire out_uop_out_1_is_eret; // @[util.scala:104:23] wire out_uop_out_1_is_sys_pc2epc; // @[util.scala:104:23] wire out_uop_out_1_is_rocc; // @[util.scala:104:23] wire out_uop_out_1_is_mov; // @[util.scala:104:23] wire [3:0] out_uop_out_1_ftq_idx; // @[util.scala:104:23] wire out_uop_out_1_edge_inst; // @[util.scala:104:23] wire [5:0] out_uop_out_1_pc_lob; // @[util.scala:104:23] wire out_uop_out_1_taken; // @[util.scala:104:23] wire out_uop_out_1_imm_rename; // @[util.scala:104:23] wire [2:0] out_uop_out_1_imm_sel; // @[util.scala:104:23] wire [4:0] out_uop_out_1_pimm; // @[util.scala:104:23] wire [19:0] out_uop_out_1_imm_packed; // @[util.scala:104:23] wire [1:0] out_uop_out_1_op1_sel; // @[util.scala:104:23] wire [2:0] out_uop_out_1_op2_sel; // @[util.scala:104:23] wire [4:0] out_uop_out_1_rob_idx; // @[util.scala:104:23] wire [3:0] out_uop_out_1_ldq_idx; // @[util.scala:104:23] wire [3:0] out_uop_out_1_stq_idx; // @[util.scala:104:23] wire [1:0] out_uop_out_1_rxq_idx; // @[util.scala:104:23] wire [5:0] out_uop_out_1_pdst; // @[util.scala:104:23] wire [5:0] out_uop_out_1_prs1; // @[util.scala:104:23] wire [5:0] out_uop_out_1_prs2; // @[util.scala:104:23] wire [5:0] out_uop_out_1_prs3; // @[util.scala:104:23] wire [3:0] out_uop_out_1_ppred; // @[util.scala:104:23] wire out_uop_out_1_prs1_busy; // @[util.scala:104:23] wire out_uop_out_1_prs2_busy; // @[util.scala:104:23] wire out_uop_out_1_prs3_busy; // @[util.scala:104:23] wire out_uop_out_1_ppred_busy; // @[util.scala:104:23] wire [5:0] out_uop_out_1_stale_pdst; // @[util.scala:104:23] wire out_uop_out_1_exception; // @[util.scala:104:23] wire [63:0] out_uop_out_1_exc_cause; // @[util.scala:104:23] wire [4:0] out_uop_out_1_mem_cmd; // @[util.scala:104:23] wire [1:0] out_uop_out_1_mem_size; // @[util.scala:104:23] wire out_uop_out_1_mem_signed; // @[util.scala:104:23] wire out_uop_out_1_uses_ldq; // @[util.scala:104:23] wire out_uop_out_1_uses_stq; // @[util.scala:104:23] wire out_uop_out_1_is_unique; // @[util.scala:104:23] wire out_uop_out_1_flush_on_commit; // @[util.scala:104:23] wire [2:0] out_uop_out_1_csr_cmd; // @[util.scala:104:23] wire out_uop_out_1_ldst_is_rs1; // @[util.scala:104:23] wire [5:0] out_uop_out_1_ldst; // @[util.scala:104:23] wire [5:0] out_uop_out_1_lrs1; // @[util.scala:104:23] wire [5:0] out_uop_out_1_lrs2; // @[util.scala:104:23] wire [5:0] out_uop_out_1_lrs3; // @[util.scala:104:23] wire [1:0] out_uop_out_1_dst_rtype; // @[util.scala:104:23] wire [1:0] out_uop_out_1_lrs1_rtype; // @[util.scala:104:23] wire [1:0] out_uop_out_1_lrs2_rtype; // @[util.scala:104:23] wire out_uop_out_1_frs3_en; // @[util.scala:104:23] wire out_uop_out_1_fcn_dw; // @[util.scala:104:23] wire [4:0] out_uop_out_1_fcn_op; // @[util.scala:104:23] wire out_uop_out_1_fp_val; // @[util.scala:104:23] wire [2:0] out_uop_out_1_fp_rm; // @[util.scala:104:23] wire [1:0] out_uop_out_1_fp_typ; // @[util.scala:104:23] wire out_uop_out_1_xcpt_pf_if; // @[util.scala:104:23] wire out_uop_out_1_xcpt_ae_if; // @[util.scala:104:23] wire out_uop_out_1_xcpt_ma_if; // @[util.scala:104:23] wire out_uop_out_1_bp_debug_if; // @[util.scala:104:23] wire out_uop_out_1_bp_xcpt_if; // @[util.scala:104:23] wire [2:0] out_uop_out_1_debug_fsrc; // @[util.scala:104:23] wire [2:0] out_uop_out_1_debug_tsrc; // @[util.scala:104:23] assign out_uop_out_1_br_mask = _out_uop_out_br_mask_T_3; // @[util.scala:93:25, :104:23] always @(posedge clock) begin // @[util.scala:458:7] if (main_io_deq_ready) begin // @[util.scala:495:23] out_reg_uop_inst <= _main_io_deq_bits_uop_inst; // @[util.scala:476:22, :477:22] out_reg_uop_debug_inst <= _main_io_deq_bits_uop_debug_inst; // @[util.scala:476:22, :477:22] out_reg_uop_is_rvc <= _main_io_deq_bits_uop_is_rvc; // @[util.scala:476:22, :477:22] out_reg_uop_debug_pc <= _main_io_deq_bits_uop_debug_pc; // @[util.scala:476:22, :477:22] out_reg_uop_iq_type_0 <= _main_io_deq_bits_uop_iq_type_0; // @[util.scala:476:22, :477:22] out_reg_uop_iq_type_1 <= _main_io_deq_bits_uop_iq_type_1; // @[util.scala:476:22, :477:22] out_reg_uop_iq_type_2 <= _main_io_deq_bits_uop_iq_type_2; // @[util.scala:476:22, :477:22] out_reg_uop_iq_type_3 <= _main_io_deq_bits_uop_iq_type_3; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_0 <= _main_io_deq_bits_uop_fu_code_0; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_1 <= _main_io_deq_bits_uop_fu_code_1; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_2 <= _main_io_deq_bits_uop_fu_code_2; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_3 <= _main_io_deq_bits_uop_fu_code_3; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_4 <= _main_io_deq_bits_uop_fu_code_4; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_5 <= _main_io_deq_bits_uop_fu_code_5; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_6 <= _main_io_deq_bits_uop_fu_code_6; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_7 <= _main_io_deq_bits_uop_fu_code_7; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_8 <= _main_io_deq_bits_uop_fu_code_8; // @[util.scala:476:22, :477:22] out_reg_uop_fu_code_9 <= _main_io_deq_bits_uop_fu_code_9; // @[util.scala:476:22, :477:22] out_reg_uop_iw_issued <= _main_io_deq_bits_uop_iw_issued; // @[util.scala:476:22, :477:22] out_reg_uop_iw_issued_partial_agen <= _main_io_deq_bits_uop_iw_issued_partial_agen; // @[util.scala:476:22, :477:22] out_reg_uop_iw_issued_partial_dgen <= _main_io_deq_bits_uop_iw_issued_partial_dgen; // @[util.scala:476:22, :477:22] out_reg_uop_iw_p1_speculative_child <= _main_io_deq_bits_uop_iw_p1_speculative_child; // @[util.scala:476:22, :477:22] out_reg_uop_iw_p2_speculative_child <= _main_io_deq_bits_uop_iw_p2_speculative_child; // @[util.scala:476:22, :477:22] out_reg_uop_iw_p1_bypass_hint <= _main_io_deq_bits_uop_iw_p1_bypass_hint; // @[util.scala:476:22, :477:22] out_reg_uop_iw_p2_bypass_hint <= _main_io_deq_bits_uop_iw_p2_bypass_hint; // @[util.scala:476:22, :477:22] out_reg_uop_iw_p3_bypass_hint <= _main_io_deq_bits_uop_iw_p3_bypass_hint; // @[util.scala:476:22, :477:22] out_reg_uop_dis_col_sel <= _main_io_deq_bits_uop_dis_col_sel; // @[util.scala:476:22, :477:22] out_reg_uop_br_mask <= _main_io_deq_bits_uop_br_mask; // @[util.scala:476:22, :477:22] out_reg_uop_br_tag <= _main_io_deq_bits_uop_br_tag; // @[util.scala:476:22, :477:22] out_reg_uop_br_type <= _main_io_deq_bits_uop_br_type; // @[util.scala:476:22, :477:22] out_reg_uop_is_sfb <= _main_io_deq_bits_uop_is_sfb; // @[util.scala:476:22, :477:22] out_reg_uop_is_fence <= _main_io_deq_bits_uop_is_fence; // @[util.scala:476:22, :477:22] out_reg_uop_is_fencei <= _main_io_deq_bits_uop_is_fencei; // @[util.scala:476:22, :477:22] out_reg_uop_is_sfence <= _main_io_deq_bits_uop_is_sfence; // @[util.scala:476:22, :477:22] out_reg_uop_is_amo <= _main_io_deq_bits_uop_is_amo; // @[util.scala:476:22, :477:22] out_reg_uop_is_eret <= _main_io_deq_bits_uop_is_eret; // @[util.scala:476:22, :477:22] out_reg_uop_is_sys_pc2epc <= _main_io_deq_bits_uop_is_sys_pc2epc; // @[util.scala:476:22, :477:22] out_reg_uop_is_rocc <= _main_io_deq_bits_uop_is_rocc; // @[util.scala:476:22, :477:22] out_reg_uop_is_mov <= _main_io_deq_bits_uop_is_mov; // @[util.scala:476:22, :477:22] out_reg_uop_ftq_idx <= _main_io_deq_bits_uop_ftq_idx; // @[util.scala:476:22, :477:22] out_reg_uop_edge_inst <= _main_io_deq_bits_uop_edge_inst; // @[util.scala:476:22, :477:22] out_reg_uop_pc_lob <= _main_io_deq_bits_uop_pc_lob; // @[util.scala:476:22, :477:22] out_reg_uop_taken <= _main_io_deq_bits_uop_taken; // @[util.scala:476:22, :477:22] out_reg_uop_imm_rename <= _main_io_deq_bits_uop_imm_rename; // @[util.scala:476:22, :477:22] out_reg_uop_imm_sel <= _main_io_deq_bits_uop_imm_sel; // @[util.scala:476:22, :477:22] out_reg_uop_pimm <= _main_io_deq_bits_uop_pimm; // @[util.scala:476:22, :477:22] out_reg_uop_imm_packed <= _main_io_deq_bits_uop_imm_packed; // @[util.scala:476:22, :477:22] out_reg_uop_op1_sel <= _main_io_deq_bits_uop_op1_sel; // @[util.scala:476:22, :477:22] out_reg_uop_op2_sel <= _main_io_deq_bits_uop_op2_sel; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_ldst <= _main_io_deq_bits_uop_fp_ctrl_ldst; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_wen <= _main_io_deq_bits_uop_fp_ctrl_wen; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_ren1 <= _main_io_deq_bits_uop_fp_ctrl_ren1; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_ren2 <= _main_io_deq_bits_uop_fp_ctrl_ren2; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_ren3 <= _main_io_deq_bits_uop_fp_ctrl_ren3; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_swap12 <= _main_io_deq_bits_uop_fp_ctrl_swap12; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_swap23 <= _main_io_deq_bits_uop_fp_ctrl_swap23; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_typeTagIn <= _main_io_deq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_typeTagOut <= _main_io_deq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_fromint <= _main_io_deq_bits_uop_fp_ctrl_fromint; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_toint <= _main_io_deq_bits_uop_fp_ctrl_toint; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_fastpipe <= _main_io_deq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_fma <= _main_io_deq_bits_uop_fp_ctrl_fma; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_div <= _main_io_deq_bits_uop_fp_ctrl_div; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_sqrt <= _main_io_deq_bits_uop_fp_ctrl_sqrt; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_wflags <= _main_io_deq_bits_uop_fp_ctrl_wflags; // @[util.scala:476:22, :477:22] out_reg_uop_fp_ctrl_vec <= _main_io_deq_bits_uop_fp_ctrl_vec; // @[util.scala:476:22, :477:22] out_reg_uop_rob_idx <= _main_io_deq_bits_uop_rob_idx; // @[util.scala:476:22, :477:22] out_reg_uop_ldq_idx <= _main_io_deq_bits_uop_ldq_idx; // @[util.scala:476:22, :477:22] out_reg_uop_stq_idx <= _main_io_deq_bits_uop_stq_idx; // @[util.scala:476:22, :477:22] out_reg_uop_rxq_idx <= _main_io_deq_bits_uop_rxq_idx; // @[util.scala:476:22, :477:22] out_reg_uop_pdst <= _main_io_deq_bits_uop_pdst; // @[util.scala:476:22, :477:22] out_reg_uop_prs1 <= _main_io_deq_bits_uop_prs1; // @[util.scala:476:22, :477:22] out_reg_uop_prs2 <= _main_io_deq_bits_uop_prs2; // @[util.scala:476:22, :477:22] out_reg_uop_prs3 <= _main_io_deq_bits_uop_prs3; // @[util.scala:476:22, :477:22] out_reg_uop_ppred <= _main_io_deq_bits_uop_ppred; // @[util.scala:476:22, :477:22] out_reg_uop_prs1_busy <= _main_io_deq_bits_uop_prs1_busy; // @[util.scala:476:22, :477:22] out_reg_uop_prs2_busy <= _main_io_deq_bits_uop_prs2_busy; // @[util.scala:476:22, :477:22] out_reg_uop_prs3_busy <= _main_io_deq_bits_uop_prs3_busy; // @[util.scala:476:22, :477:22] out_reg_uop_ppred_busy <= _main_io_deq_bits_uop_ppred_busy; // @[util.scala:476:22, :477:22] out_reg_uop_stale_pdst <= _main_io_deq_bits_uop_stale_pdst; // @[util.scala:476:22, :477:22] out_reg_uop_exception <= _main_io_deq_bits_uop_exception; // @[util.scala:476:22, :477:22] out_reg_uop_exc_cause <= _main_io_deq_bits_uop_exc_cause; // @[util.scala:476:22, :477:22] out_reg_uop_mem_cmd <= _main_io_deq_bits_uop_mem_cmd; // @[util.scala:476:22, :477:22] out_reg_uop_mem_size <= _main_io_deq_bits_uop_mem_size; // @[util.scala:476:22, :477:22] out_reg_uop_mem_signed <= _main_io_deq_bits_uop_mem_signed; // @[util.scala:476:22, :477:22] out_reg_uop_uses_ldq <= _main_io_deq_bits_uop_uses_ldq; // @[util.scala:476:22, :477:22] out_reg_uop_uses_stq <= _main_io_deq_bits_uop_uses_stq; // @[util.scala:476:22, :477:22] out_reg_uop_is_unique <= _main_io_deq_bits_uop_is_unique; // @[util.scala:476:22, :477:22] out_reg_uop_flush_on_commit <= _main_io_deq_bits_uop_flush_on_commit; // @[util.scala:476:22, :477:22] out_reg_uop_csr_cmd <= _main_io_deq_bits_uop_csr_cmd; // @[util.scala:476:22, :477:22] out_reg_uop_ldst_is_rs1 <= _main_io_deq_bits_uop_ldst_is_rs1; // @[util.scala:476:22, :477:22] out_reg_uop_ldst <= _main_io_deq_bits_uop_ldst; // @[util.scala:476:22, :477:22] out_reg_uop_lrs1 <= _main_io_deq_bits_uop_lrs1; // @[util.scala:476:22, :477:22] out_reg_uop_lrs2 <= _main_io_deq_bits_uop_lrs2; // @[util.scala:476:22, :477:22] out_reg_uop_lrs3 <= _main_io_deq_bits_uop_lrs3; // @[util.scala:476:22, :477:22] out_reg_uop_dst_rtype <= _main_io_deq_bits_uop_dst_rtype; // @[util.scala:476:22, :477:22] out_reg_uop_lrs1_rtype <= _main_io_deq_bits_uop_lrs1_rtype; // @[util.scala:476:22, :477:22] out_reg_uop_lrs2_rtype <= _main_io_deq_bits_uop_lrs2_rtype; // @[util.scala:476:22, :477:22] out_reg_uop_frs3_en <= _main_io_deq_bits_uop_frs3_en; // @[util.scala:476:22, :477:22] out_reg_uop_fcn_dw <= _main_io_deq_bits_uop_fcn_dw; // @[util.scala:476:22, :477:22] out_reg_uop_fcn_op <= _main_io_deq_bits_uop_fcn_op; // @[util.scala:476:22, :477:22] out_reg_uop_fp_val <= _main_io_deq_bits_uop_fp_val; // @[util.scala:476:22, :477:22] out_reg_uop_fp_rm <= _main_io_deq_bits_uop_fp_rm; // @[util.scala:476:22, :477:22] out_reg_uop_fp_typ <= _main_io_deq_bits_uop_fp_typ; // @[util.scala:476:22, :477:22] out_reg_uop_xcpt_pf_if <= _main_io_deq_bits_uop_xcpt_pf_if; // @[util.scala:476:22, :477:22] out_reg_uop_xcpt_ae_if <= _main_io_deq_bits_uop_xcpt_ae_if; // @[util.scala:476:22, :477:22] out_reg_uop_xcpt_ma_if <= _main_io_deq_bits_uop_xcpt_ma_if; // @[util.scala:476:22, :477:22] out_reg_uop_bp_debug_if <= _main_io_deq_bits_uop_bp_debug_if; // @[util.scala:476:22, :477:22] out_reg_uop_bp_xcpt_if <= _main_io_deq_bits_uop_bp_xcpt_if; // @[util.scala:476:22, :477:22] out_reg_uop_debug_fsrc <= _main_io_deq_bits_uop_debug_fsrc; // @[util.scala:476:22, :477:22] out_reg_uop_debug_tsrc <= _main_io_deq_bits_uop_debug_tsrc; // @[util.scala:476:22, :477:22] out_reg_addr <= _main_io_deq_bits_addr; // @[util.scala:476:22, :477:22] out_reg_data <= _main_io_deq_bits_data; // @[util.scala:476:22, :477:22] out_reg_is_hella <= _main_io_deq_bits_is_hella; // @[util.scala:476:22, :477:22] out_reg_tag_match <= _main_io_deq_bits_tag_match; // @[util.scala:476:22, :477:22] out_reg_old_meta_coh_state <= _main_io_deq_bits_old_meta_coh_state; // @[util.scala:476:22, :477:22] out_reg_old_meta_tag <= _main_io_deq_bits_old_meta_tag; // @[util.scala:476:22, :477:22] out_reg_way_en <= _main_io_deq_bits_way_en; // @[util.scala:476:22, :477:22] out_reg_sdq_id <= _main_io_deq_bits_sdq_id; // @[util.scala:476:22, :477:22] end out_uop_inst <= main_io_deq_ready ? out_uop_out_1_inst : out_uop_out_inst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_debug_inst <= main_io_deq_ready ? out_uop_out_1_debug_inst : out_uop_out_debug_inst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_rvc <= main_io_deq_ready ? out_uop_out_1_is_rvc : out_uop_out_is_rvc; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_debug_pc <= main_io_deq_ready ? out_uop_out_1_debug_pc : out_uop_out_debug_pc; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iq_type_0 <= main_io_deq_ready ? out_uop_out_1_iq_type_0 : out_uop_out_iq_type_0; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iq_type_1 <= main_io_deq_ready ? out_uop_out_1_iq_type_1 : out_uop_out_iq_type_1; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iq_type_2 <= main_io_deq_ready ? out_uop_out_1_iq_type_2 : out_uop_out_iq_type_2; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iq_type_3 <= main_io_deq_ready ? out_uop_out_1_iq_type_3 : out_uop_out_iq_type_3; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_0 <= main_io_deq_ready ? out_uop_out_1_fu_code_0 : out_uop_out_fu_code_0; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_1 <= main_io_deq_ready ? out_uop_out_1_fu_code_1 : out_uop_out_fu_code_1; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_2 <= main_io_deq_ready ? out_uop_out_1_fu_code_2 : out_uop_out_fu_code_2; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_3 <= main_io_deq_ready ? out_uop_out_1_fu_code_3 : out_uop_out_fu_code_3; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_4 <= main_io_deq_ready ? out_uop_out_1_fu_code_4 : out_uop_out_fu_code_4; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_5 <= main_io_deq_ready ? out_uop_out_1_fu_code_5 : out_uop_out_fu_code_5; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_6 <= main_io_deq_ready ? out_uop_out_1_fu_code_6 : out_uop_out_fu_code_6; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_7 <= main_io_deq_ready ? out_uop_out_1_fu_code_7 : out_uop_out_fu_code_7; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_8 <= main_io_deq_ready ? out_uop_out_1_fu_code_8 : out_uop_out_fu_code_8; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fu_code_9 <= main_io_deq_ready ? out_uop_out_1_fu_code_9 : out_uop_out_fu_code_9; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_issued <= main_io_deq_ready ? out_uop_out_1_iw_issued : out_uop_out_iw_issued; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_issued_partial_agen <= main_io_deq_ready ? out_uop_out_1_iw_issued_partial_agen : out_uop_out_iw_issued_partial_agen; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_issued_partial_dgen <= main_io_deq_ready ? out_uop_out_1_iw_issued_partial_dgen : out_uop_out_iw_issued_partial_dgen; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_p1_speculative_child <= main_io_deq_ready ? out_uop_out_1_iw_p1_speculative_child : out_uop_out_iw_p1_speculative_child; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_p2_speculative_child <= main_io_deq_ready ? out_uop_out_1_iw_p2_speculative_child : out_uop_out_iw_p2_speculative_child; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_p1_bypass_hint <= main_io_deq_ready ? out_uop_out_1_iw_p1_bypass_hint : out_uop_out_iw_p1_bypass_hint; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_p2_bypass_hint <= main_io_deq_ready ? out_uop_out_1_iw_p2_bypass_hint : out_uop_out_iw_p2_bypass_hint; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_iw_p3_bypass_hint <= main_io_deq_ready ? out_uop_out_1_iw_p3_bypass_hint : out_uop_out_iw_p3_bypass_hint; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_dis_col_sel <= main_io_deq_ready ? out_uop_out_1_dis_col_sel : out_uop_out_dis_col_sel; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_br_mask <= main_io_deq_ready ? out_uop_out_1_br_mask : out_uop_out_br_mask; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_br_tag <= main_io_deq_ready ? out_uop_out_1_br_tag : out_uop_out_br_tag; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_br_type <= main_io_deq_ready ? out_uop_out_1_br_type : out_uop_out_br_type; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_sfb <= main_io_deq_ready ? out_uop_out_1_is_sfb : out_uop_out_is_sfb; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_fence <= main_io_deq_ready ? out_uop_out_1_is_fence : out_uop_out_is_fence; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_fencei <= main_io_deq_ready ? out_uop_out_1_is_fencei : out_uop_out_is_fencei; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_sfence <= main_io_deq_ready ? out_uop_out_1_is_sfence : out_uop_out_is_sfence; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_amo <= main_io_deq_ready ? out_uop_out_1_is_amo : out_uop_out_is_amo; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_eret <= main_io_deq_ready ? out_uop_out_1_is_eret : out_uop_out_is_eret; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_sys_pc2epc <= main_io_deq_ready ? out_uop_out_1_is_sys_pc2epc : out_uop_out_is_sys_pc2epc; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_rocc <= main_io_deq_ready ? out_uop_out_1_is_rocc : out_uop_out_is_rocc; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_mov <= main_io_deq_ready ? out_uop_out_1_is_mov : out_uop_out_is_mov; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_ftq_idx <= main_io_deq_ready ? out_uop_out_1_ftq_idx : out_uop_out_ftq_idx; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_edge_inst <= main_io_deq_ready ? out_uop_out_1_edge_inst : out_uop_out_edge_inst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_pc_lob <= main_io_deq_ready ? out_uop_out_1_pc_lob : out_uop_out_pc_lob; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_taken <= main_io_deq_ready ? out_uop_out_1_taken : out_uop_out_taken; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_imm_rename <= main_io_deq_ready ? out_uop_out_1_imm_rename : out_uop_out_imm_rename; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_imm_sel <= main_io_deq_ready ? out_uop_out_1_imm_sel : out_uop_out_imm_sel; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_pimm <= main_io_deq_ready ? out_uop_out_1_pimm : out_uop_out_pimm; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_imm_packed <= main_io_deq_ready ? out_uop_out_1_imm_packed : out_uop_out_imm_packed; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_op1_sel <= main_io_deq_ready ? out_uop_out_1_op1_sel : out_uop_out_op1_sel; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_op2_sel <= main_io_deq_ready ? out_uop_out_1_op2_sel : out_uop_out_op2_sel; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_ldst <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_ldst : out_uop_out_fp_ctrl_ldst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_wen <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_wen : out_uop_out_fp_ctrl_wen; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_ren1 <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_ren1 : out_uop_out_fp_ctrl_ren1; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_ren2 <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_ren2 : out_uop_out_fp_ctrl_ren2; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_ren3 <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_ren3 : out_uop_out_fp_ctrl_ren3; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_swap12 <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_swap12 : out_uop_out_fp_ctrl_swap12; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_swap23 <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_swap23 : out_uop_out_fp_ctrl_swap23; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_typeTagIn <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_typeTagIn : out_uop_out_fp_ctrl_typeTagIn; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_typeTagOut <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_typeTagOut : out_uop_out_fp_ctrl_typeTagOut; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_fromint <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_fromint : out_uop_out_fp_ctrl_fromint; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_toint <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_toint : out_uop_out_fp_ctrl_toint; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_fastpipe <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_fastpipe : out_uop_out_fp_ctrl_fastpipe; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_fma <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_fma : out_uop_out_fp_ctrl_fma; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_div <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_div : out_uop_out_fp_ctrl_div; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_sqrt <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_sqrt : out_uop_out_fp_ctrl_sqrt; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_wflags <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_wflags : out_uop_out_fp_ctrl_wflags; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_ctrl_vec <= main_io_deq_ready ? out_uop_out_1_fp_ctrl_vec : out_uop_out_fp_ctrl_vec; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_rob_idx <= main_io_deq_ready ? out_uop_out_1_rob_idx : out_uop_out_rob_idx; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_ldq_idx <= main_io_deq_ready ? out_uop_out_1_ldq_idx : out_uop_out_ldq_idx; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_stq_idx <= main_io_deq_ready ? out_uop_out_1_stq_idx : out_uop_out_stq_idx; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_rxq_idx <= main_io_deq_ready ? out_uop_out_1_rxq_idx : out_uop_out_rxq_idx; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_pdst <= main_io_deq_ready ? out_uop_out_1_pdst : out_uop_out_pdst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_prs1 <= main_io_deq_ready ? out_uop_out_1_prs1 : out_uop_out_prs1; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_prs2 <= main_io_deq_ready ? out_uop_out_1_prs2 : out_uop_out_prs2; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_prs3 <= main_io_deq_ready ? out_uop_out_1_prs3 : out_uop_out_prs3; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_ppred <= main_io_deq_ready ? out_uop_out_1_ppred : out_uop_out_ppred; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_prs1_busy <= main_io_deq_ready ? out_uop_out_1_prs1_busy : out_uop_out_prs1_busy; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_prs2_busy <= main_io_deq_ready ? out_uop_out_1_prs2_busy : out_uop_out_prs2_busy; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_prs3_busy <= main_io_deq_ready ? out_uop_out_1_prs3_busy : out_uop_out_prs3_busy; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_ppred_busy <= main_io_deq_ready ? out_uop_out_1_ppred_busy : out_uop_out_ppred_busy; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_stale_pdst <= main_io_deq_ready ? out_uop_out_1_stale_pdst : out_uop_out_stale_pdst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_exception <= main_io_deq_ready ? out_uop_out_1_exception : out_uop_out_exception; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_exc_cause <= main_io_deq_ready ? out_uop_out_1_exc_cause : out_uop_out_exc_cause; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_mem_cmd <= main_io_deq_ready ? out_uop_out_1_mem_cmd : out_uop_out_mem_cmd; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_mem_size <= main_io_deq_ready ? out_uop_out_1_mem_size : out_uop_out_mem_size; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_mem_signed <= main_io_deq_ready ? out_uop_out_1_mem_signed : out_uop_out_mem_signed; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_uses_ldq <= main_io_deq_ready ? out_uop_out_1_uses_ldq : out_uop_out_uses_ldq; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_uses_stq <= main_io_deq_ready ? out_uop_out_1_uses_stq : out_uop_out_uses_stq; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_is_unique <= main_io_deq_ready ? out_uop_out_1_is_unique : out_uop_out_is_unique; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_flush_on_commit <= main_io_deq_ready ? out_uop_out_1_flush_on_commit : out_uop_out_flush_on_commit; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_csr_cmd <= main_io_deq_ready ? out_uop_out_1_csr_cmd : out_uop_out_csr_cmd; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_ldst_is_rs1 <= main_io_deq_ready ? out_uop_out_1_ldst_is_rs1 : out_uop_out_ldst_is_rs1; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_ldst <= main_io_deq_ready ? out_uop_out_1_ldst : out_uop_out_ldst; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_lrs1 <= main_io_deq_ready ? out_uop_out_1_lrs1 : out_uop_out_lrs1; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_lrs2 <= main_io_deq_ready ? out_uop_out_1_lrs2 : out_uop_out_lrs2; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_lrs3 <= main_io_deq_ready ? out_uop_out_1_lrs3 : out_uop_out_lrs3; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_dst_rtype <= main_io_deq_ready ? out_uop_out_1_dst_rtype : out_uop_out_dst_rtype; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_lrs1_rtype <= main_io_deq_ready ? out_uop_out_1_lrs1_rtype : out_uop_out_lrs1_rtype; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_lrs2_rtype <= main_io_deq_ready ? out_uop_out_1_lrs2_rtype : out_uop_out_lrs2_rtype; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_frs3_en <= main_io_deq_ready ? out_uop_out_1_frs3_en : out_uop_out_frs3_en; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fcn_dw <= main_io_deq_ready ? out_uop_out_1_fcn_dw : out_uop_out_fcn_dw; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fcn_op <= main_io_deq_ready ? out_uop_out_1_fcn_op : out_uop_out_fcn_op; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_val <= main_io_deq_ready ? out_uop_out_1_fp_val : out_uop_out_fp_val; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_rm <= main_io_deq_ready ? out_uop_out_1_fp_rm : out_uop_out_fp_rm; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_fp_typ <= main_io_deq_ready ? out_uop_out_1_fp_typ : out_uop_out_fp_typ; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_xcpt_pf_if <= main_io_deq_ready ? out_uop_out_1_xcpt_pf_if : out_uop_out_xcpt_pf_if; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_xcpt_ae_if <= main_io_deq_ready ? out_uop_out_1_xcpt_ae_if : out_uop_out_xcpt_ae_if; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_xcpt_ma_if <= main_io_deq_ready ? out_uop_out_1_xcpt_ma_if : out_uop_out_xcpt_ma_if; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_bp_debug_if <= main_io_deq_ready ? out_uop_out_1_bp_debug_if : out_uop_out_bp_debug_if; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_bp_xcpt_if <= main_io_deq_ready ? out_uop_out_1_bp_xcpt_if : out_uop_out_bp_xcpt_if; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_debug_fsrc <= main_io_deq_ready ? out_uop_out_1_debug_fsrc : out_uop_out_debug_fsrc; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] out_uop_debug_tsrc <= main_io_deq_ready ? out_uop_out_1_debug_tsrc : out_uop_out_debug_tsrc; // @[util.scala:104:23, :479:22, :491:13, :495:{23,38}, :498:15] if (reset) // @[util.scala:458:7] out_valid <= 1'h0; // @[util.scala:478:28] else // @[util.scala:458:7] out_valid <= main_io_deq_ready ? _out_valid_T_15 : _out_valid_T_7; // @[util.scala:478:28, :492:{15,80}, :495:{23,38}, :496:{17,103}] always @(posedge) BranchKillableQueue_2 main ( // @[util.scala:476:22] .clock (clock), .reset (reset), .io_enq_ready (io_enq_ready_0), .io_enq_valid (io_enq_valid_0), // @[util.scala:458:7] .io_enq_bits_uop_inst (io_enq_bits_uop_inst_0), // @[util.scala:458:7] .io_enq_bits_uop_debug_inst (io_enq_bits_uop_debug_inst_0), // @[util.scala:458:7] .io_enq_bits_uop_is_rvc (io_enq_bits_uop_is_rvc_0), // @[util.scala:458:7] .io_enq_bits_uop_debug_pc (io_enq_bits_uop_debug_pc_0), // @[util.scala:458:7] .io_enq_bits_uop_iq_type_0 (io_enq_bits_uop_iq_type_0_0), // @[util.scala:458:7] .io_enq_bits_uop_iq_type_1 (io_enq_bits_uop_iq_type_1_0), // @[util.scala:458:7] .io_enq_bits_uop_iq_type_2 (io_enq_bits_uop_iq_type_2_0), // @[util.scala:458:7] .io_enq_bits_uop_iq_type_3 (io_enq_bits_uop_iq_type_3_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_0 (io_enq_bits_uop_fu_code_0_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_1 (io_enq_bits_uop_fu_code_1_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_2 (io_enq_bits_uop_fu_code_2_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_3 (io_enq_bits_uop_fu_code_3_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_4 (io_enq_bits_uop_fu_code_4_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_5 (io_enq_bits_uop_fu_code_5_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_6 (io_enq_bits_uop_fu_code_6_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_7 (io_enq_bits_uop_fu_code_7_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_8 (io_enq_bits_uop_fu_code_8_0), // @[util.scala:458:7] .io_enq_bits_uop_fu_code_9 (io_enq_bits_uop_fu_code_9_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_issued (io_enq_bits_uop_iw_issued_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_issued_partial_agen (io_enq_bits_uop_iw_issued_partial_agen_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_issued_partial_dgen (io_enq_bits_uop_iw_issued_partial_dgen_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_p1_speculative_child (io_enq_bits_uop_iw_p1_speculative_child_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_p2_speculative_child (io_enq_bits_uop_iw_p2_speculative_child_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_p1_bypass_hint (io_enq_bits_uop_iw_p1_bypass_hint_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_p2_bypass_hint (io_enq_bits_uop_iw_p2_bypass_hint_0), // @[util.scala:458:7] .io_enq_bits_uop_iw_p3_bypass_hint (io_enq_bits_uop_iw_p3_bypass_hint_0), // @[util.scala:458:7] .io_enq_bits_uop_dis_col_sel (io_enq_bits_uop_dis_col_sel_0), // @[util.scala:458:7] .io_enq_bits_uop_br_mask (io_enq_bits_uop_br_mask_0), // @[util.scala:458:7] .io_enq_bits_uop_br_tag (io_enq_bits_uop_br_tag_0), // @[util.scala:458:7] .io_enq_bits_uop_br_type (io_enq_bits_uop_br_type_0), // @[util.scala:458:7] .io_enq_bits_uop_is_sfb (io_enq_bits_uop_is_sfb_0), // @[util.scala:458:7] .io_enq_bits_uop_is_fence (io_enq_bits_uop_is_fence_0), // @[util.scala:458:7] .io_enq_bits_uop_is_fencei (io_enq_bits_uop_is_fencei_0), // @[util.scala:458:7] .io_enq_bits_uop_is_sfence (io_enq_bits_uop_is_sfence_0), // @[util.scala:458:7] .io_enq_bits_uop_is_amo (io_enq_bits_uop_is_amo_0), // @[util.scala:458:7] .io_enq_bits_uop_is_eret (io_enq_bits_uop_is_eret_0), // @[util.scala:458:7] .io_enq_bits_uop_is_sys_pc2epc (io_enq_bits_uop_is_sys_pc2epc_0), // @[util.scala:458:7] .io_enq_bits_uop_is_rocc (io_enq_bits_uop_is_rocc_0), // @[util.scala:458:7] .io_enq_bits_uop_is_mov (io_enq_bits_uop_is_mov_0), // @[util.scala:458:7] .io_enq_bits_uop_ftq_idx (io_enq_bits_uop_ftq_idx_0), // @[util.scala:458:7] .io_enq_bits_uop_edge_inst (io_enq_bits_uop_edge_inst_0), // @[util.scala:458:7] .io_enq_bits_uop_pc_lob (io_enq_bits_uop_pc_lob_0), // @[util.scala:458:7] .io_enq_bits_uop_taken (io_enq_bits_uop_taken_0), // @[util.scala:458:7] .io_enq_bits_uop_imm_rename (io_enq_bits_uop_imm_rename_0), // @[util.scala:458:7] .io_enq_bits_uop_imm_sel (io_enq_bits_uop_imm_sel_0), // @[util.scala:458:7] .io_enq_bits_uop_pimm (io_enq_bits_uop_pimm_0), // @[util.scala:458:7] .io_enq_bits_uop_imm_packed (io_enq_bits_uop_imm_packed_0), // @[util.scala:458:7] .io_enq_bits_uop_op1_sel (io_enq_bits_uop_op1_sel_0), // @[util.scala:458:7] .io_enq_bits_uop_op2_sel (io_enq_bits_uop_op2_sel_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_ldst (io_enq_bits_uop_fp_ctrl_ldst_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_wen (io_enq_bits_uop_fp_ctrl_wen_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_ren1 (io_enq_bits_uop_fp_ctrl_ren1_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_ren2 (io_enq_bits_uop_fp_ctrl_ren2_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_ren3 (io_enq_bits_uop_fp_ctrl_ren3_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_swap12 (io_enq_bits_uop_fp_ctrl_swap12_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_swap23 (io_enq_bits_uop_fp_ctrl_swap23_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_typeTagIn (io_enq_bits_uop_fp_ctrl_typeTagIn_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_typeTagOut (io_enq_bits_uop_fp_ctrl_typeTagOut_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_fromint (io_enq_bits_uop_fp_ctrl_fromint_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_toint (io_enq_bits_uop_fp_ctrl_toint_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_fastpipe (io_enq_bits_uop_fp_ctrl_fastpipe_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_fma (io_enq_bits_uop_fp_ctrl_fma_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_div (io_enq_bits_uop_fp_ctrl_div_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_sqrt (io_enq_bits_uop_fp_ctrl_sqrt_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_wflags (io_enq_bits_uop_fp_ctrl_wflags_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_ctrl_vec (io_enq_bits_uop_fp_ctrl_vec_0), // @[util.scala:458:7] .io_enq_bits_uop_rob_idx (io_enq_bits_uop_rob_idx_0), // @[util.scala:458:7] .io_enq_bits_uop_ldq_idx (io_enq_bits_uop_ldq_idx_0), // @[util.scala:458:7] .io_enq_bits_uop_stq_idx (io_enq_bits_uop_stq_idx_0), // @[util.scala:458:7] .io_enq_bits_uop_rxq_idx (io_enq_bits_uop_rxq_idx_0), // @[util.scala:458:7] .io_enq_bits_uop_pdst (io_enq_bits_uop_pdst_0), // @[util.scala:458:7] .io_enq_bits_uop_prs1 (io_enq_bits_uop_prs1_0), // @[util.scala:458:7] .io_enq_bits_uop_prs2 (io_enq_bits_uop_prs2_0), // @[util.scala:458:7] .io_enq_bits_uop_prs3 (io_enq_bits_uop_prs3_0), // @[util.scala:458:7] .io_enq_bits_uop_ppred (io_enq_bits_uop_ppred_0), // @[util.scala:458:7] .io_enq_bits_uop_prs1_busy (io_enq_bits_uop_prs1_busy_0), // @[util.scala:458:7] .io_enq_bits_uop_prs2_busy (io_enq_bits_uop_prs2_busy_0), // @[util.scala:458:7] .io_enq_bits_uop_prs3_busy (io_enq_bits_uop_prs3_busy_0), // @[util.scala:458:7] .io_enq_bits_uop_ppred_busy (io_enq_bits_uop_ppred_busy_0), // @[util.scala:458:7] .io_enq_bits_uop_stale_pdst (io_enq_bits_uop_stale_pdst_0), // @[util.scala:458:7] .io_enq_bits_uop_exception (io_enq_bits_uop_exception_0), // @[util.scala:458:7] .io_enq_bits_uop_exc_cause (io_enq_bits_uop_exc_cause_0), // @[util.scala:458:7] .io_enq_bits_uop_mem_cmd (io_enq_bits_uop_mem_cmd_0), // @[util.scala:458:7] .io_enq_bits_uop_mem_size (io_enq_bits_uop_mem_size_0), // @[util.scala:458:7] .io_enq_bits_uop_mem_signed (io_enq_bits_uop_mem_signed_0), // @[util.scala:458:7] .io_enq_bits_uop_uses_ldq (io_enq_bits_uop_uses_ldq_0), // @[util.scala:458:7] .io_enq_bits_uop_uses_stq (io_enq_bits_uop_uses_stq_0), // @[util.scala:458:7] .io_enq_bits_uop_is_unique (io_enq_bits_uop_is_unique_0), // @[util.scala:458:7] .io_enq_bits_uop_flush_on_commit (io_enq_bits_uop_flush_on_commit_0), // @[util.scala:458:7] .io_enq_bits_uop_csr_cmd (io_enq_bits_uop_csr_cmd_0), // @[util.scala:458:7] .io_enq_bits_uop_ldst_is_rs1 (io_enq_bits_uop_ldst_is_rs1_0), // @[util.scala:458:7] .io_enq_bits_uop_ldst (io_enq_bits_uop_ldst_0), // @[util.scala:458:7] .io_enq_bits_uop_lrs1 (io_enq_bits_uop_lrs1_0), // @[util.scala:458:7] .io_enq_bits_uop_lrs2 (io_enq_bits_uop_lrs2_0), // @[util.scala:458:7] .io_enq_bits_uop_lrs3 (io_enq_bits_uop_lrs3_0), // @[util.scala:458:7] .io_enq_bits_uop_dst_rtype (io_enq_bits_uop_dst_rtype_0), // @[util.scala:458:7] .io_enq_bits_uop_lrs1_rtype (io_enq_bits_uop_lrs1_rtype_0), // @[util.scala:458:7] .io_enq_bits_uop_lrs2_rtype (io_enq_bits_uop_lrs2_rtype_0), // @[util.scala:458:7] .io_enq_bits_uop_frs3_en (io_enq_bits_uop_frs3_en_0), // @[util.scala:458:7] .io_enq_bits_uop_fcn_dw (io_enq_bits_uop_fcn_dw_0), // @[util.scala:458:7] .io_enq_bits_uop_fcn_op (io_enq_bits_uop_fcn_op_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_val (io_enq_bits_uop_fp_val_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_rm (io_enq_bits_uop_fp_rm_0), // @[util.scala:458:7] .io_enq_bits_uop_fp_typ (io_enq_bits_uop_fp_typ_0), // @[util.scala:458:7] .io_enq_bits_uop_xcpt_pf_if (io_enq_bits_uop_xcpt_pf_if_0), // @[util.scala:458:7] .io_enq_bits_uop_xcpt_ae_if (io_enq_bits_uop_xcpt_ae_if_0), // @[util.scala:458:7] .io_enq_bits_uop_xcpt_ma_if (io_enq_bits_uop_xcpt_ma_if_0), // @[util.scala:458:7] .io_enq_bits_uop_bp_debug_if (io_enq_bits_uop_bp_debug_if_0), // @[util.scala:458:7] .io_enq_bits_uop_bp_xcpt_if (io_enq_bits_uop_bp_xcpt_if_0), // @[util.scala:458:7] .io_enq_bits_uop_debug_fsrc (io_enq_bits_uop_debug_fsrc_0), // @[util.scala:458:7] .io_enq_bits_uop_debug_tsrc (io_enq_bits_uop_debug_tsrc_0), // @[util.scala:458:7] .io_enq_bits_addr (io_enq_bits_addr_0), // @[util.scala:458:7] .io_enq_bits_data (io_enq_bits_data_0), // @[util.scala:458:7] .io_enq_bits_is_hella (io_enq_bits_is_hella_0), // @[util.scala:458:7] .io_enq_bits_tag_match (io_enq_bits_tag_match_0), // @[util.scala:458:7] .io_enq_bits_old_meta_coh_state (io_enq_bits_old_meta_coh_state_0), // @[util.scala:458:7] .io_enq_bits_old_meta_tag (io_enq_bits_old_meta_tag_0), // @[util.scala:458:7] .io_enq_bits_way_en (io_enq_bits_way_en_0), // @[util.scala:458:7] .io_enq_bits_sdq_id (io_enq_bits_sdq_id_0), // @[util.scala:458:7] .io_deq_ready (main_io_deq_ready), // @[util.scala:495:23] .io_deq_valid (_out_valid_T_12), .io_deq_bits_uop_inst (_main_io_deq_bits_uop_inst), .io_deq_bits_uop_debug_inst (_main_io_deq_bits_uop_debug_inst), .io_deq_bits_uop_is_rvc (_main_io_deq_bits_uop_is_rvc), .io_deq_bits_uop_debug_pc (_main_io_deq_bits_uop_debug_pc), .io_deq_bits_uop_iq_type_0 (_main_io_deq_bits_uop_iq_type_0), .io_deq_bits_uop_iq_type_1 (_main_io_deq_bits_uop_iq_type_1), .io_deq_bits_uop_iq_type_2 (_main_io_deq_bits_uop_iq_type_2), .io_deq_bits_uop_iq_type_3 (_main_io_deq_bits_uop_iq_type_3), .io_deq_bits_uop_fu_code_0 (_main_io_deq_bits_uop_fu_code_0), .io_deq_bits_uop_fu_code_1 (_main_io_deq_bits_uop_fu_code_1), .io_deq_bits_uop_fu_code_2 (_main_io_deq_bits_uop_fu_code_2), .io_deq_bits_uop_fu_code_3 (_main_io_deq_bits_uop_fu_code_3), .io_deq_bits_uop_fu_code_4 (_main_io_deq_bits_uop_fu_code_4), .io_deq_bits_uop_fu_code_5 (_main_io_deq_bits_uop_fu_code_5), .io_deq_bits_uop_fu_code_6 (_main_io_deq_bits_uop_fu_code_6), .io_deq_bits_uop_fu_code_7 (_main_io_deq_bits_uop_fu_code_7), .io_deq_bits_uop_fu_code_8 (_main_io_deq_bits_uop_fu_code_8), .io_deq_bits_uop_fu_code_9 (_main_io_deq_bits_uop_fu_code_9), .io_deq_bits_uop_iw_issued (_main_io_deq_bits_uop_iw_issued), .io_deq_bits_uop_iw_issued_partial_agen (_main_io_deq_bits_uop_iw_issued_partial_agen), .io_deq_bits_uop_iw_issued_partial_dgen (_main_io_deq_bits_uop_iw_issued_partial_dgen), .io_deq_bits_uop_iw_p1_speculative_child (_main_io_deq_bits_uop_iw_p1_speculative_child), .io_deq_bits_uop_iw_p2_speculative_child (_main_io_deq_bits_uop_iw_p2_speculative_child), .io_deq_bits_uop_iw_p1_bypass_hint (_main_io_deq_bits_uop_iw_p1_bypass_hint), .io_deq_bits_uop_iw_p2_bypass_hint (_main_io_deq_bits_uop_iw_p2_bypass_hint), .io_deq_bits_uop_iw_p3_bypass_hint (_main_io_deq_bits_uop_iw_p3_bypass_hint), .io_deq_bits_uop_dis_col_sel (_main_io_deq_bits_uop_dis_col_sel), .io_deq_bits_uop_br_mask (_main_io_deq_bits_uop_br_mask), .io_deq_bits_uop_br_tag (_main_io_deq_bits_uop_br_tag), .io_deq_bits_uop_br_type (_main_io_deq_bits_uop_br_type), .io_deq_bits_uop_is_sfb (_main_io_deq_bits_uop_is_sfb), .io_deq_bits_uop_is_fence (_main_io_deq_bits_uop_is_fence), .io_deq_bits_uop_is_fencei (_main_io_deq_bits_uop_is_fencei), .io_deq_bits_uop_is_sfence (_main_io_deq_bits_uop_is_sfence), .io_deq_bits_uop_is_amo (_main_io_deq_bits_uop_is_amo), .io_deq_bits_uop_is_eret (_main_io_deq_bits_uop_is_eret), .io_deq_bits_uop_is_sys_pc2epc (_main_io_deq_bits_uop_is_sys_pc2epc), .io_deq_bits_uop_is_rocc (_main_io_deq_bits_uop_is_rocc), .io_deq_bits_uop_is_mov (_main_io_deq_bits_uop_is_mov), .io_deq_bits_uop_ftq_idx (_main_io_deq_bits_uop_ftq_idx), .io_deq_bits_uop_edge_inst (_main_io_deq_bits_uop_edge_inst), .io_deq_bits_uop_pc_lob (_main_io_deq_bits_uop_pc_lob), .io_deq_bits_uop_taken (_main_io_deq_bits_uop_taken), .io_deq_bits_uop_imm_rename (_main_io_deq_bits_uop_imm_rename), .io_deq_bits_uop_imm_sel (_main_io_deq_bits_uop_imm_sel), .io_deq_bits_uop_pimm (_main_io_deq_bits_uop_pimm), .io_deq_bits_uop_imm_packed (_main_io_deq_bits_uop_imm_packed), .io_deq_bits_uop_op1_sel (_main_io_deq_bits_uop_op1_sel), .io_deq_bits_uop_op2_sel (_main_io_deq_bits_uop_op2_sel), .io_deq_bits_uop_fp_ctrl_ldst (_main_io_deq_bits_uop_fp_ctrl_ldst), .io_deq_bits_uop_fp_ctrl_wen (_main_io_deq_bits_uop_fp_ctrl_wen), .io_deq_bits_uop_fp_ctrl_ren1 (_main_io_deq_bits_uop_fp_ctrl_ren1), .io_deq_bits_uop_fp_ctrl_ren2 (_main_io_deq_bits_uop_fp_ctrl_ren2), .io_deq_bits_uop_fp_ctrl_ren3 (_main_io_deq_bits_uop_fp_ctrl_ren3), .io_deq_bits_uop_fp_ctrl_swap12 (_main_io_deq_bits_uop_fp_ctrl_swap12), .io_deq_bits_uop_fp_ctrl_swap23 (_main_io_deq_bits_uop_fp_ctrl_swap23), .io_deq_bits_uop_fp_ctrl_typeTagIn (_main_io_deq_bits_uop_fp_ctrl_typeTagIn), .io_deq_bits_uop_fp_ctrl_typeTagOut (_main_io_deq_bits_uop_fp_ctrl_typeTagOut), .io_deq_bits_uop_fp_ctrl_fromint (_main_io_deq_bits_uop_fp_ctrl_fromint), .io_deq_bits_uop_fp_ctrl_toint (_main_io_deq_bits_uop_fp_ctrl_toint), .io_deq_bits_uop_fp_ctrl_fastpipe (_main_io_deq_bits_uop_fp_ctrl_fastpipe), .io_deq_bits_uop_fp_ctrl_fma (_main_io_deq_bits_uop_fp_ctrl_fma), .io_deq_bits_uop_fp_ctrl_div (_main_io_deq_bits_uop_fp_ctrl_div), .io_deq_bits_uop_fp_ctrl_sqrt (_main_io_deq_bits_uop_fp_ctrl_sqrt), .io_deq_bits_uop_fp_ctrl_wflags (_main_io_deq_bits_uop_fp_ctrl_wflags), .io_deq_bits_uop_fp_ctrl_vec (_main_io_deq_bits_uop_fp_ctrl_vec), .io_deq_bits_uop_rob_idx (_main_io_deq_bits_uop_rob_idx), .io_deq_bits_uop_ldq_idx (_main_io_deq_bits_uop_ldq_idx), .io_deq_bits_uop_stq_idx (_main_io_deq_bits_uop_stq_idx), .io_deq_bits_uop_rxq_idx (_main_io_deq_bits_uop_rxq_idx), .io_deq_bits_uop_pdst (_main_io_deq_bits_uop_pdst), .io_deq_bits_uop_prs1 (_main_io_deq_bits_uop_prs1), .io_deq_bits_uop_prs2 (_main_io_deq_bits_uop_prs2), .io_deq_bits_uop_prs3 (_main_io_deq_bits_uop_prs3), .io_deq_bits_uop_ppred (_main_io_deq_bits_uop_ppred), .io_deq_bits_uop_prs1_busy (_main_io_deq_bits_uop_prs1_busy), .io_deq_bits_uop_prs2_busy (_main_io_deq_bits_uop_prs2_busy), .io_deq_bits_uop_prs3_busy (_main_io_deq_bits_uop_prs3_busy), .io_deq_bits_uop_ppred_busy (_main_io_deq_bits_uop_ppred_busy), .io_deq_bits_uop_stale_pdst (_main_io_deq_bits_uop_stale_pdst), .io_deq_bits_uop_exception (_main_io_deq_bits_uop_exception), .io_deq_bits_uop_exc_cause (_main_io_deq_bits_uop_exc_cause), .io_deq_bits_uop_mem_cmd (_main_io_deq_bits_uop_mem_cmd), .io_deq_bits_uop_mem_size (_main_io_deq_bits_uop_mem_size), .io_deq_bits_uop_mem_signed (_main_io_deq_bits_uop_mem_signed), .io_deq_bits_uop_uses_ldq (_main_io_deq_bits_uop_uses_ldq), .io_deq_bits_uop_uses_stq (_main_io_deq_bits_uop_uses_stq), .io_deq_bits_uop_is_unique (_main_io_deq_bits_uop_is_unique), .io_deq_bits_uop_flush_on_commit (_main_io_deq_bits_uop_flush_on_commit), .io_deq_bits_uop_csr_cmd (_main_io_deq_bits_uop_csr_cmd), .io_deq_bits_uop_ldst_is_rs1 (_main_io_deq_bits_uop_ldst_is_rs1), .io_deq_bits_uop_ldst (_main_io_deq_bits_uop_ldst), .io_deq_bits_uop_lrs1 (_main_io_deq_bits_uop_lrs1), .io_deq_bits_uop_lrs2 (_main_io_deq_bits_uop_lrs2), .io_deq_bits_uop_lrs3 (_main_io_deq_bits_uop_lrs3), .io_deq_bits_uop_dst_rtype (_main_io_deq_bits_uop_dst_rtype), .io_deq_bits_uop_lrs1_rtype (_main_io_deq_bits_uop_lrs1_rtype), .io_deq_bits_uop_lrs2_rtype (_main_io_deq_bits_uop_lrs2_rtype), .io_deq_bits_uop_frs3_en (_main_io_deq_bits_uop_frs3_en), .io_deq_bits_uop_fcn_dw (_main_io_deq_bits_uop_fcn_dw), .io_deq_bits_uop_fcn_op (_main_io_deq_bits_uop_fcn_op), .io_deq_bits_uop_fp_val (_main_io_deq_bits_uop_fp_val), .io_deq_bits_uop_fp_rm (_main_io_deq_bits_uop_fp_rm), .io_deq_bits_uop_fp_typ (_main_io_deq_bits_uop_fp_typ), .io_deq_bits_uop_xcpt_pf_if (_main_io_deq_bits_uop_xcpt_pf_if), .io_deq_bits_uop_xcpt_ae_if (_main_io_deq_bits_uop_xcpt_ae_if), .io_deq_bits_uop_xcpt_ma_if (_main_io_deq_bits_uop_xcpt_ma_if), .io_deq_bits_uop_bp_debug_if (_main_io_deq_bits_uop_bp_debug_if), .io_deq_bits_uop_bp_xcpt_if (_main_io_deq_bits_uop_bp_xcpt_if), .io_deq_bits_uop_debug_fsrc (_main_io_deq_bits_uop_debug_fsrc), .io_deq_bits_uop_debug_tsrc (_main_io_deq_bits_uop_debug_tsrc), .io_deq_bits_addr (_main_io_deq_bits_addr), .io_deq_bits_data (_main_io_deq_bits_data), .io_deq_bits_is_hella (_main_io_deq_bits_is_hella), .io_deq_bits_tag_match (_main_io_deq_bits_tag_match), .io_deq_bits_old_meta_coh_state (_main_io_deq_bits_old_meta_coh_state), .io_deq_bits_old_meta_tag (_main_io_deq_bits_old_meta_tag), .io_deq_bits_way_en (_main_io_deq_bits_way_en), .io_deq_bits_sdq_id (_main_io_deq_bits_sdq_id), .io_empty (_main_io_empty), .io_count (_main_io_count) ); // @[util.scala:476:22] assign out_uop_out_1_inst = _main_io_deq_bits_uop_inst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_debug_inst = _main_io_deq_bits_uop_debug_inst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_rvc = _main_io_deq_bits_uop_is_rvc; // @[util.scala:104:23, :476:22] assign out_uop_out_1_debug_pc = _main_io_deq_bits_uop_debug_pc; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iq_type_0 = _main_io_deq_bits_uop_iq_type_0; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iq_type_1 = _main_io_deq_bits_uop_iq_type_1; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iq_type_2 = _main_io_deq_bits_uop_iq_type_2; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iq_type_3 = _main_io_deq_bits_uop_iq_type_3; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_0 = _main_io_deq_bits_uop_fu_code_0; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_1 = _main_io_deq_bits_uop_fu_code_1; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_2 = _main_io_deq_bits_uop_fu_code_2; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_3 = _main_io_deq_bits_uop_fu_code_3; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_4 = _main_io_deq_bits_uop_fu_code_4; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_5 = _main_io_deq_bits_uop_fu_code_5; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_6 = _main_io_deq_bits_uop_fu_code_6; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_7 = _main_io_deq_bits_uop_fu_code_7; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_8 = _main_io_deq_bits_uop_fu_code_8; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fu_code_9 = _main_io_deq_bits_uop_fu_code_9; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_issued = _main_io_deq_bits_uop_iw_issued; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_issued_partial_agen = _main_io_deq_bits_uop_iw_issued_partial_agen; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_issued_partial_dgen = _main_io_deq_bits_uop_iw_issued_partial_dgen; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_p1_speculative_child = _main_io_deq_bits_uop_iw_p1_speculative_child; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_p2_speculative_child = _main_io_deq_bits_uop_iw_p2_speculative_child; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_p1_bypass_hint = _main_io_deq_bits_uop_iw_p1_bypass_hint; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_p2_bypass_hint = _main_io_deq_bits_uop_iw_p2_bypass_hint; // @[util.scala:104:23, :476:22] assign out_uop_out_1_iw_p3_bypass_hint = _main_io_deq_bits_uop_iw_p3_bypass_hint; // @[util.scala:104:23, :476:22] assign out_uop_out_1_dis_col_sel = _main_io_deq_bits_uop_dis_col_sel; // @[util.scala:104:23, :476:22] assign out_uop_out_1_br_tag = _main_io_deq_bits_uop_br_tag; // @[util.scala:104:23, :476:22] assign out_uop_out_1_br_type = _main_io_deq_bits_uop_br_type; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_sfb = _main_io_deq_bits_uop_is_sfb; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_fence = _main_io_deq_bits_uop_is_fence; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_fencei = _main_io_deq_bits_uop_is_fencei; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_sfence = _main_io_deq_bits_uop_is_sfence; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_amo = _main_io_deq_bits_uop_is_amo; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_eret = _main_io_deq_bits_uop_is_eret; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_sys_pc2epc = _main_io_deq_bits_uop_is_sys_pc2epc; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_rocc = _main_io_deq_bits_uop_is_rocc; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_mov = _main_io_deq_bits_uop_is_mov; // @[util.scala:104:23, :476:22] assign out_uop_out_1_ftq_idx = _main_io_deq_bits_uop_ftq_idx; // @[util.scala:104:23, :476:22] assign out_uop_out_1_edge_inst = _main_io_deq_bits_uop_edge_inst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_pc_lob = _main_io_deq_bits_uop_pc_lob; // @[util.scala:104:23, :476:22] assign out_uop_out_1_taken = _main_io_deq_bits_uop_taken; // @[util.scala:104:23, :476:22] assign out_uop_out_1_imm_rename = _main_io_deq_bits_uop_imm_rename; // @[util.scala:104:23, :476:22] assign out_uop_out_1_imm_sel = _main_io_deq_bits_uop_imm_sel; // @[util.scala:104:23, :476:22] assign out_uop_out_1_pimm = _main_io_deq_bits_uop_pimm; // @[util.scala:104:23, :476:22] assign out_uop_out_1_imm_packed = _main_io_deq_bits_uop_imm_packed; // @[util.scala:104:23, :476:22] assign out_uop_out_1_op1_sel = _main_io_deq_bits_uop_op1_sel; // @[util.scala:104:23, :476:22] assign out_uop_out_1_op2_sel = _main_io_deq_bits_uop_op2_sel; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_ldst = _main_io_deq_bits_uop_fp_ctrl_ldst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_wen = _main_io_deq_bits_uop_fp_ctrl_wen; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_ren1 = _main_io_deq_bits_uop_fp_ctrl_ren1; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_ren2 = _main_io_deq_bits_uop_fp_ctrl_ren2; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_ren3 = _main_io_deq_bits_uop_fp_ctrl_ren3; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_swap12 = _main_io_deq_bits_uop_fp_ctrl_swap12; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_swap23 = _main_io_deq_bits_uop_fp_ctrl_swap23; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_typeTagIn = _main_io_deq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_typeTagOut = _main_io_deq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_fromint = _main_io_deq_bits_uop_fp_ctrl_fromint; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_toint = _main_io_deq_bits_uop_fp_ctrl_toint; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_fastpipe = _main_io_deq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_fma = _main_io_deq_bits_uop_fp_ctrl_fma; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_div = _main_io_deq_bits_uop_fp_ctrl_div; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_sqrt = _main_io_deq_bits_uop_fp_ctrl_sqrt; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_wflags = _main_io_deq_bits_uop_fp_ctrl_wflags; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_ctrl_vec = _main_io_deq_bits_uop_fp_ctrl_vec; // @[util.scala:104:23, :476:22] assign out_uop_out_1_rob_idx = _main_io_deq_bits_uop_rob_idx; // @[util.scala:104:23, :476:22] assign out_uop_out_1_ldq_idx = _main_io_deq_bits_uop_ldq_idx; // @[util.scala:104:23, :476:22] assign out_uop_out_1_stq_idx = _main_io_deq_bits_uop_stq_idx; // @[util.scala:104:23, :476:22] assign out_uop_out_1_rxq_idx = _main_io_deq_bits_uop_rxq_idx; // @[util.scala:104:23, :476:22] assign out_uop_out_1_pdst = _main_io_deq_bits_uop_pdst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_prs1 = _main_io_deq_bits_uop_prs1; // @[util.scala:104:23, :476:22] assign out_uop_out_1_prs2 = _main_io_deq_bits_uop_prs2; // @[util.scala:104:23, :476:22] assign out_uop_out_1_prs3 = _main_io_deq_bits_uop_prs3; // @[util.scala:104:23, :476:22] assign out_uop_out_1_ppred = _main_io_deq_bits_uop_ppred; // @[util.scala:104:23, :476:22] assign out_uop_out_1_prs1_busy = _main_io_deq_bits_uop_prs1_busy; // @[util.scala:104:23, :476:22] assign out_uop_out_1_prs2_busy = _main_io_deq_bits_uop_prs2_busy; // @[util.scala:104:23, :476:22] assign out_uop_out_1_prs3_busy = _main_io_deq_bits_uop_prs3_busy; // @[util.scala:104:23, :476:22] assign out_uop_out_1_ppred_busy = _main_io_deq_bits_uop_ppred_busy; // @[util.scala:104:23, :476:22] assign out_uop_out_1_stale_pdst = _main_io_deq_bits_uop_stale_pdst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_exception = _main_io_deq_bits_uop_exception; // @[util.scala:104:23, :476:22] assign out_uop_out_1_exc_cause = _main_io_deq_bits_uop_exc_cause; // @[util.scala:104:23, :476:22] assign out_uop_out_1_mem_cmd = _main_io_deq_bits_uop_mem_cmd; // @[util.scala:104:23, :476:22] assign out_uop_out_1_mem_size = _main_io_deq_bits_uop_mem_size; // @[util.scala:104:23, :476:22] assign out_uop_out_1_mem_signed = _main_io_deq_bits_uop_mem_signed; // @[util.scala:104:23, :476:22] assign out_uop_out_1_uses_ldq = _main_io_deq_bits_uop_uses_ldq; // @[util.scala:104:23, :476:22] assign out_uop_out_1_uses_stq = _main_io_deq_bits_uop_uses_stq; // @[util.scala:104:23, :476:22] assign out_uop_out_1_is_unique = _main_io_deq_bits_uop_is_unique; // @[util.scala:104:23, :476:22] assign out_uop_out_1_flush_on_commit = _main_io_deq_bits_uop_flush_on_commit; // @[util.scala:104:23, :476:22] assign out_uop_out_1_csr_cmd = _main_io_deq_bits_uop_csr_cmd; // @[util.scala:104:23, :476:22] assign out_uop_out_1_ldst_is_rs1 = _main_io_deq_bits_uop_ldst_is_rs1; // @[util.scala:104:23, :476:22] assign out_uop_out_1_ldst = _main_io_deq_bits_uop_ldst; // @[util.scala:104:23, :476:22] assign out_uop_out_1_lrs1 = _main_io_deq_bits_uop_lrs1; // @[util.scala:104:23, :476:22] assign out_uop_out_1_lrs2 = _main_io_deq_bits_uop_lrs2; // @[util.scala:104:23, :476:22] assign out_uop_out_1_lrs3 = _main_io_deq_bits_uop_lrs3; // @[util.scala:104:23, :476:22] assign out_uop_out_1_dst_rtype = _main_io_deq_bits_uop_dst_rtype; // @[util.scala:104:23, :476:22] assign out_uop_out_1_lrs1_rtype = _main_io_deq_bits_uop_lrs1_rtype; // @[util.scala:104:23, :476:22] assign out_uop_out_1_lrs2_rtype = _main_io_deq_bits_uop_lrs2_rtype; // @[util.scala:104:23, :476:22] assign out_uop_out_1_frs3_en = _main_io_deq_bits_uop_frs3_en; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fcn_dw = _main_io_deq_bits_uop_fcn_dw; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fcn_op = _main_io_deq_bits_uop_fcn_op; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_val = _main_io_deq_bits_uop_fp_val; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_rm = _main_io_deq_bits_uop_fp_rm; // @[util.scala:104:23, :476:22] assign out_uop_out_1_fp_typ = _main_io_deq_bits_uop_fp_typ; // @[util.scala:104:23, :476:22] assign out_uop_out_1_xcpt_pf_if = _main_io_deq_bits_uop_xcpt_pf_if; // @[util.scala:104:23, :476:22] assign out_uop_out_1_xcpt_ae_if = _main_io_deq_bits_uop_xcpt_ae_if; // @[util.scala:104:23, :476:22] assign out_uop_out_1_xcpt_ma_if = _main_io_deq_bits_uop_xcpt_ma_if; // @[util.scala:104:23, :476:22] assign out_uop_out_1_bp_debug_if = _main_io_deq_bits_uop_bp_debug_if; // @[util.scala:104:23, :476:22] assign out_uop_out_1_bp_xcpt_if = _main_io_deq_bits_uop_bp_xcpt_if; // @[util.scala:104:23, :476:22] assign out_uop_out_1_debug_fsrc = _main_io_deq_bits_uop_debug_fsrc; // @[util.scala:104:23, :476:22] assign out_uop_out_1_debug_tsrc = _main_io_deq_bits_uop_debug_tsrc; // @[util.scala:104:23, :476:22] assign _out_uop_out_br_mask_T_3 = _main_io_deq_bits_uop_br_mask; // @[util.scala:93:25, :476:22] assign io_enq_ready = io_enq_ready_0; // @[util.scala:458:7] assign io_deq_valid = io_deq_valid_0; // @[util.scala:458:7] assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:458:7] assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7] assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7] assign io_deq_bits_uop_iq_type_0 = io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7] assign io_deq_bits_uop_iq_type_1 = io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7] assign io_deq_bits_uop_iq_type_2 = io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7] assign io_deq_bits_uop_iq_type_3 = io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_0 = io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_1 = io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_2 = io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_3 = io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_4 = io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_5 = io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_6 = io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_7 = io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_8 = io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7] assign io_deq_bits_uop_fu_code_9 = io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_issued = io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_issued_partial_agen = io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_issued_partial_dgen = io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_p1_speculative_child = io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_p2_speculative_child = io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_p1_bypass_hint = io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_p2_bypass_hint = io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7] assign io_deq_bits_uop_iw_p3_bypass_hint = io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7] assign io_deq_bits_uop_dis_col_sel = io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7] assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:458:7] assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:458:7] assign io_deq_bits_uop_br_type = io_deq_bits_uop_br_type_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_sfence = io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_eret = io_deq_bits_uop_is_eret_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_rocc = io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_mov = io_deq_bits_uop_is_mov_0; // @[util.scala:458:7] assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7] assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7] assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7] assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:458:7] assign io_deq_bits_uop_imm_rename = io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7] assign io_deq_bits_uop_imm_sel = io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7] assign io_deq_bits_uop_pimm = io_deq_bits_uop_pimm_0; // @[util.scala:458:7] assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7] assign io_deq_bits_uop_op1_sel = io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7] assign io_deq_bits_uop_op2_sel = io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_ldst = io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_wen = io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_ren1 = io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_ren2 = io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_ren3 = io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_swap12 = io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_swap23 = io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_typeTagIn = io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_typeTagOut = io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_fromint = io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_toint = io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_fastpipe = io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_fma = io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_div = io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_sqrt = io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_wflags = io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_ctrl_vec = io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7] assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7] assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7] assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7] assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7] assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:458:7] assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:458:7] assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:458:7] assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:458:7] assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:458:7] assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7] assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7] assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7] assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7] assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7] assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:458:7] assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7] assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7] assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:458:7] assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7] assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7] assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7] assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:458:7] assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7] assign io_deq_bits_uop_csr_cmd = io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7] assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7] assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:458:7] assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:458:7] assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:458:7] assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:458:7] assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7] assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7] assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7] assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7] assign io_deq_bits_uop_fcn_dw = io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7] assign io_deq_bits_uop_fcn_op = io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_rm = io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7] assign io_deq_bits_uop_fp_typ = io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7] assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7] assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7] assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7] assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7] assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7] assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7] assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7] assign io_deq_bits_addr = io_deq_bits_addr_0; // @[util.scala:458:7] assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:458:7] assign io_deq_bits_is_hella = io_deq_bits_is_hella_0; // @[util.scala:458:7] assign io_deq_bits_tag_match = io_deq_bits_tag_match_0; // @[util.scala:458:7] assign io_deq_bits_old_meta_coh_state = io_deq_bits_old_meta_coh_state_0; // @[util.scala:458:7] assign io_deq_bits_old_meta_tag = io_deq_bits_old_meta_tag_0; // @[util.scala:458:7] assign io_deq_bits_way_en = io_deq_bits_way_en_0; // @[util.scala:458:7] assign io_deq_bits_sdq_id = io_deq_bits_sdq_id_0; // @[util.scala:458:7] assign io_empty = io_empty_0; // @[util.scala:458:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File TLChannelCompactor.scala: package testchipip.serdes import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ trait TLFieldHelper { def getBodyFields(b: TLChannel): Seq[Data] = b match { case b: TLBundleA => Seq(b.mask, b.data, b.corrupt) case b: TLBundleB => Seq(b.mask, b.data, b.corrupt) case b: TLBundleC => Seq( b.data, b.corrupt) case b: TLBundleD => Seq( b.data, b.corrupt) case b: TLBundleE => Seq() } def getConstFields(b: TLChannel): Seq[Data] = b match { case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo ) case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address ) case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo ) case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied) case b: TLBundleE => Seq( b.sink ) } def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits)) } class TLBeat(val beatWidth: Int) extends Bundle { val payload = UInt(beatWidth.W) val head = Bool() val tail = Bool() } abstract class TLChannelToBeat[T <: TLChannel](gen: => T, edge: TLEdge, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper { override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_") val beatWidth = minTLPayloadWidth(gen) val io = IO(new Bundle { val protocol = Flipped(Decoupled(gen)) val beat = Decoupled(new TLBeat(beatWidth)) }) def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B // convert decoupled to irrevocable val q = Module(new Queue(gen, 1, pipe=true, flow=true)) q.io.enq <> io.protocol val protocol = q.io.deq val has_body = Wire(Bool()) val body_fields = getBodyFields(protocol.bits) val const_fields = getConstFields(protocol.bits) val head = edge.first(protocol.bits, protocol.fire) val tail = edge.last(protocol.bits, protocol.fire) val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt)) val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt)) val is_body = RegInit(false.B) io.beat.valid := protocol.valid protocol.ready := io.beat.ready && (is_body || !has_body) io.beat.bits.head := head && !is_body io.beat.bits.tail := tail && (is_body || !has_body) io.beat.bits.payload := Mux(is_body, body, const) when (io.beat.fire && io.beat.bits.head) { is_body := true.B } when (io.beat.fire && io.beat.bits.tail) { is_body := false.B } } abstract class TLChannelFromBeat[T <: TLChannel](gen: => T, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper { override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_") val beatWidth = minTLPayloadWidth(gen) val io = IO(new Bundle { val protocol = Decoupled(gen) val beat = Flipped(Decoupled(new TLBeat(beatWidth))) }) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) val protocol = Wire(Decoupled(gen)) io.protocol <> protocol val body_fields = getBodyFields(protocol.bits) val const_fields = getConstFields(protocol.bits) val is_const = RegInit(true.B) val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W)) val const = Mux(io.beat.bits.head, io.beat.bits.payload, const_reg) io.beat.ready := (is_const && !io.beat.bits.tail) || protocol.ready protocol.valid := (!is_const || io.beat.bits.tail) && io.beat.valid def assign(i: UInt, sigs: Seq[Data]) = { var t = i for (s <- sigs.reverse) { s := t.asTypeOf(s.cloneType) t = t >> s.getWidth } } assign(const, const_fields) assign(io.beat.bits.payload, body_fields) when (io.beat.fire && io.beat.bits.head) { is_const := false.B; const_reg := io.beat.bits.payload } when (io.beat.fire && io.beat.bits.tail) { is_const := true.B } } class TLAToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleA(bundle), edgeIn, nameSuffix)(p) { has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U) } class TLAFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleA(bundle), nameSuffix)(p) { when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) } } class TLBToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleB(bundle), edgeOut, nameSuffix)(p) { has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U) } class TLBFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleB(bundle), nameSuffix)(p) { when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) } } class TLCToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleC(bundle), edgeIn, nameSuffix)(p) { has_body := edgeIn.hasData(protocol.bits) } class TLCFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleC(bundle), nameSuffix)(p) class TLDToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleD(bundle), edgeOut, nameSuffix)(p) { has_body := edgeOut.hasData(protocol.bits) } class TLDFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleD(bundle), nameSuffix)(p) class TLEToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleE(bundle), edgeIn, nameSuffix)(p) { has_body := edgeIn.hasData(protocol.bits) } class TLEFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleE(bundle), nameSuffix)(p)
module TLAFromBeat_serial_tl_0_a64d64s8k8z8c( // @[TLChannelCompactor.scala:112:7] input clock, // @[TLChannelCompactor.scala:112:7] input reset, // @[TLChannelCompactor.scala:112:7] input io_protocol_ready, // @[TLChannelCompactor.scala:75:14] output io_protocol_valid, // @[TLChannelCompactor.scala:75:14] output [2:0] io_protocol_bits_opcode, // @[TLChannelCompactor.scala:75:14] output [2:0] io_protocol_bits_param, // @[TLChannelCompactor.scala:75:14] output [7:0] io_protocol_bits_size, // @[TLChannelCompactor.scala:75:14] output [7:0] io_protocol_bits_source, // @[TLChannelCompactor.scala:75:14] output [63:0] io_protocol_bits_address, // @[TLChannelCompactor.scala:75:14] output [7:0] io_protocol_bits_mask, // @[TLChannelCompactor.scala:75:14] output [63:0] io_protocol_bits_data, // @[TLChannelCompactor.scala:75:14] output io_protocol_bits_corrupt, // @[TLChannelCompactor.scala:75:14] output io_beat_ready, // @[TLChannelCompactor.scala:75:14] input io_beat_valid, // @[TLChannelCompactor.scala:75:14] input [85:0] io_beat_bits_payload, // @[TLChannelCompactor.scala:75:14] input io_beat_bits_head, // @[TLChannelCompactor.scala:75:14] input io_beat_bits_tail // @[TLChannelCompactor.scala:75:14] ); wire io_protocol_ready_0 = io_protocol_ready; // @[TLChannelCompactor.scala:112:7] wire io_beat_valid_0 = io_beat_valid; // @[TLChannelCompactor.scala:112:7] wire [85:0] io_beat_bits_payload_0 = io_beat_bits_payload; // @[TLChannelCompactor.scala:112:7] wire io_beat_bits_head_0 = io_beat_bits_head; // @[TLChannelCompactor.scala:112:7] wire io_beat_bits_tail_0 = io_beat_bits_tail; // @[TLChannelCompactor.scala:112:7] wire [7:0] _io_protocol_bits_mask_T = 8'hFF; // @[TLChannelCompactor.scala:113:55] wire protocol_ready = io_protocol_ready_0; // @[TLChannelCompactor.scala:83:22, :112:7] wire protocol_valid; // @[TLChannelCompactor.scala:83:22] wire [2:0] protocol_bits_opcode; // @[TLChannelCompactor.scala:83:22] wire [2:0] protocol_bits_param; // @[TLChannelCompactor.scala:83:22] wire [7:0] protocol_bits_size; // @[TLChannelCompactor.scala:83:22] wire [7:0] protocol_bits_source; // @[TLChannelCompactor.scala:83:22] wire [63:0] protocol_bits_address; // @[TLChannelCompactor.scala:83:22] wire [63:0] protocol_bits_data; // @[TLChannelCompactor.scala:83:22] wire protocol_bits_corrupt; // @[TLChannelCompactor.scala:83:22] wire _io_beat_ready_T_2; // @[TLChannelCompactor.scala:91:53] wire [2:0] io_protocol_bits_opcode_0; // @[TLChannelCompactor.scala:112:7] wire [2:0] io_protocol_bits_param_0; // @[TLChannelCompactor.scala:112:7] wire [7:0] io_protocol_bits_size_0; // @[TLChannelCompactor.scala:112:7] wire [7:0] io_protocol_bits_source_0; // @[TLChannelCompactor.scala:112:7] wire [63:0] io_protocol_bits_address_0; // @[TLChannelCompactor.scala:112:7] wire [7:0] io_protocol_bits_mask_0; // @[TLChannelCompactor.scala:112:7] wire [63:0] io_protocol_bits_data_0; // @[TLChannelCompactor.scala:112:7] wire io_protocol_bits_corrupt_0; // @[TLChannelCompactor.scala:112:7] wire io_protocol_valid_0; // @[TLChannelCompactor.scala:112:7] wire io_beat_ready_0; // @[TLChannelCompactor.scala:112:7] wire _protocol_valid_T_2; // @[TLChannelCompactor.scala:92:54] assign io_protocol_valid_0 = protocol_valid; // @[TLChannelCompactor.scala:83:22, :112:7] wire [2:0] _protocol_bits_opcode_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_opcode_0 = protocol_bits_opcode; // @[TLChannelCompactor.scala:83:22, :112:7] wire [2:0] _protocol_bits_param_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_param_0 = protocol_bits_param; // @[TLChannelCompactor.scala:83:22, :112:7] wire [7:0] _protocol_bits_size_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_size_0 = protocol_bits_size; // @[TLChannelCompactor.scala:83:22, :112:7] wire [7:0] _protocol_bits_source_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_source_0 = protocol_bits_source; // @[TLChannelCompactor.scala:83:22, :112:7] wire [63:0] _protocol_bits_address_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_address_0 = protocol_bits_address; // @[TLChannelCompactor.scala:83:22, :112:7] wire [7:0] _protocol_bits_mask_WIRE; // @[TLChannelCompactor.scala:97:22] wire [63:0] _protocol_bits_data_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_data_0 = protocol_bits_data; // @[TLChannelCompactor.scala:83:22, :112:7] wire _protocol_bits_corrupt_WIRE; // @[TLChannelCompactor.scala:97:22] assign io_protocol_bits_corrupt_0 = protocol_bits_corrupt; // @[TLChannelCompactor.scala:83:22, :112:7] wire [7:0] protocol_bits_mask; // @[TLChannelCompactor.scala:83:22] reg is_const; // @[TLChannelCompactor.scala:88:25] reg [85:0] const_reg; // @[TLChannelCompactor.scala:89:22] wire [85:0] const_0 = io_beat_bits_head_0 ? io_beat_bits_payload_0 : const_reg; // @[TLChannelCompactor.scala:89:22, :90:18, :112:7] wire _io_beat_ready_T = ~io_beat_bits_tail_0; // @[TLChannelCompactor.scala:91:33, :112:7] wire _io_beat_ready_T_1 = is_const & _io_beat_ready_T; // @[TLChannelCompactor.scala:88:25, :91:{30,33}] assign _io_beat_ready_T_2 = _io_beat_ready_T_1 | protocol_ready; // @[TLChannelCompactor.scala:83:22, :91:{30,53}] assign io_beat_ready_0 = _io_beat_ready_T_2; // @[TLChannelCompactor.scala:91:53, :112:7] wire _protocol_valid_T = ~is_const; // @[TLChannelCompactor.scala:88:25, :92:22] wire _protocol_valid_T_1 = _protocol_valid_T | io_beat_bits_tail_0; // @[TLChannelCompactor.scala:92:{22,32}, :112:7] assign _protocol_valid_T_2 = _protocol_valid_T_1 & io_beat_valid_0; // @[TLChannelCompactor.scala:92:{32,54}, :112:7] assign protocol_valid = _protocol_valid_T_2; // @[TLChannelCompactor.scala:83:22, :92:54] assign protocol_bits_address = _protocol_bits_address_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_address_WIRE = const_0[63:0]; // @[TLChannelCompactor.scala:90:18, :97:22] assign protocol_bits_source = _protocol_bits_source_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_source_WIRE = const_0[71:64]; // @[TLChannelCompactor.scala:90:18, :97:22, :98:13] assign protocol_bits_size = _protocol_bits_size_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_size_WIRE = const_0[79:72]; // @[TLChannelCompactor.scala:90:18, :97:22, :98:13] assign protocol_bits_param = _protocol_bits_param_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_param_WIRE = const_0[82:80]; // @[TLChannelCompactor.scala:90:18, :97:22, :98:13] assign _protocol_bits_opcode_WIRE = const_0[85:83]; // @[TLChannelCompactor.scala:90:18, :97:22, :98:13] assign protocol_bits_opcode = _protocol_bits_opcode_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign protocol_bits_corrupt = _protocol_bits_corrupt_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_corrupt_WIRE = io_beat_bits_payload_0[0]; // @[TLChannelCompactor.scala:97:22, :112:7] assign protocol_bits_data = _protocol_bits_data_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_data_WIRE = io_beat_bits_payload_0[64:1]; // @[TLChannelCompactor.scala:97:22, :98:13, :112:7] assign protocol_bits_mask = _protocol_bits_mask_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22] assign _protocol_bits_mask_WIRE = io_beat_bits_payload_0[72:65]; // @[TLChannelCompactor.scala:97:22, :98:13, :112:7] assign io_protocol_bits_mask_0 = io_beat_bits_head_0 ? 8'hFF : protocol_bits_mask; // @[TLChannelCompactor.scala:83:22, :84:15, :112:7, :113:{28,52}] wire _T_12 = io_beat_ready_0 & io_beat_valid_0; // @[Decoupled.scala:51:35] wire _T_11 = _T_12 & io_beat_bits_head_0; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[TLChannelCompactor.scala:112:7] if (reset) // @[TLChannelCompactor.scala:112:7] is_const <= 1'h1; // @[TLChannelCompactor.scala:88:25] else // @[TLChannelCompactor.scala:112:7] is_const <= _T_12 & io_beat_bits_tail_0 | ~_T_11 & is_const; // @[Decoupled.scala:51:35] if (_T_11) // @[TLChannelCompactor.scala:104:22] const_reg <= io_beat_bits_payload_0; // @[TLChannelCompactor.scala:89:22, :112:7] always @(posedge) assign io_protocol_valid = io_protocol_valid_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_opcode = io_protocol_bits_opcode_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_param = io_protocol_bits_param_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_size = io_protocol_bits_size_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_source = io_protocol_bits_source_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_address = io_protocol_bits_address_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_mask = io_protocol_bits_mask_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_data = io_protocol_bits_data_0; // @[TLChannelCompactor.scala:112:7] assign io_protocol_bits_corrupt = io_protocol_bits_corrupt_0; // @[TLChannelCompactor.scala:112:7] assign io_beat_ready = io_beat_ready_0; // @[TLChannelCompactor.scala:112:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Metadata.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.constants.MemoryOpConstants import freechips.rocketchip.util._ object ClientStates { val width = 2 def Nothing = 0.U(width.W) def Branch = 1.U(width.W) def Trunk = 2.U(width.W) def Dirty = 3.U(width.W) def hasReadPermission(state: UInt): Bool = state > Nothing def hasWritePermission(state: UInt): Bool = state > Branch } object MemoryOpCategories extends MemoryOpConstants { def wr = Cat(true.B, true.B) // Op actually writes def wi = Cat(false.B, true.B) // Future op will write def rd = Cat(false.B, false.B) // Op only reads def categorize(cmd: UInt): UInt = { val cat = Cat(isWrite(cmd), isWriteIntent(cmd)) //assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.") cat } } /** Stores the client-side coherence information, * such as permissions on the data and whether the data is dirty. * Its API can be used to make TileLink messages in response to * memory operations, cache control oeprations, or Probe messages. */ class ClientMetadata extends Bundle { /** Actual state information stored in this bundle */ val state = UInt(ClientStates.width.W) /** Metadata equality */ def ===(rhs: UInt): Bool = state === rhs def ===(rhs: ClientMetadata): Bool = state === rhs.state def =/=(rhs: ClientMetadata): Bool = !this.===(rhs) /** Is the block's data present in this cache */ def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing /** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */ private def growStarter(cmd: UInt): (Bool, UInt) = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) MuxTLookup(Cat(c, state), (false.B, 0.U), Seq( //(effect, am now) -> (was a hit, next) Cat(rd, Dirty) -> (true.B, Dirty), Cat(rd, Trunk) -> (true.B, Trunk), Cat(rd, Branch) -> (true.B, Branch), Cat(wi, Dirty) -> (true.B, Dirty), Cat(wi, Trunk) -> (true.B, Trunk), Cat(wr, Dirty) -> (true.B, Dirty), Cat(wr, Trunk) -> (true.B, Dirty), //(effect, am now) -> (was a miss, param) Cat(rd, Nothing) -> (false.B, NtoB), Cat(wi, Branch) -> (false.B, BtoT), Cat(wi, Nothing) -> (false.B, NtoT), Cat(wr, Branch) -> (false.B, BtoT), Cat(wr, Nothing) -> (false.B, NtoT))) } /** Determine what state to go to after miss based on Grant param * For now, doesn't depend on state (which may have been Probed). */ private def growFinisher(cmd: UInt, param: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) //assert(c === rd || param === toT, "Client was expecting trunk permissions.") MuxLookup(Cat(c, param), Nothing)(Seq( //(effect param) -> (next) Cat(rd, toB) -> Branch, Cat(rd, toT) -> Trunk, Cat(wi, toT) -> Trunk, Cat(wr, toT) -> Dirty)) } /** Does this cache have permissions on this block sufficient to perform op, * and what to do next (Acquire message param or updated metadata). */ def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = growStarter(cmd) (r._1, r._2, ClientMetadata(r._2)) } /** Does a secondary miss on the block require another Acquire message */ def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = { import MemoryOpCategories._ val r1 = growStarter(first_cmd) val r2 = growStarter(second_cmd) val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd) val hit_again = r1._1 && r2._1 val dirties = categorize(second_cmd) === wr val biggest_grow_param = Mux(dirties, r2._2, r1._2) val dirtiest_state = ClientMetadata(biggest_grow_param) val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd) (needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd) } /** Metadata change on a returned Grant */ def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param)) /** Determine what state to go to based on Probe param */ private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = { import ClientStates._ import TLPermissions._ MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq( //(wanted, am now) -> (hasDirtyData resp, next) Cat(toT, Dirty) -> (true.B, TtoT, Trunk), Cat(toT, Trunk) -> (false.B, TtoT, Trunk), Cat(toT, Branch) -> (false.B, BtoB, Branch), Cat(toT, Nothing) -> (false.B, NtoN, Nothing), Cat(toB, Dirty) -> (true.B, TtoB, Branch), Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade Cat(toB, Branch) -> (false.B, BtoB, Branch), Cat(toB, Nothing) -> (false.B, NtoN, Nothing), Cat(toN, Dirty) -> (true.B, TtoN, Nothing), Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Nothing) -> (false.B, NtoN, Nothing))) } /** Translate cache control cmds into Probe param */ private def cmdToPermCap(cmd: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ MuxLookup(cmd, toN)(Seq( M_FLUSH -> toN, M_PRODUCE -> toB, M_CLEAN -> toT)) } def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(cmdToPermCap(cmd)) (r._1, r._2, ClientMetadata(r._3)) } def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(param) (r._1, r._2, ClientMetadata(r._3)) } } /** Factories for ClientMetadata, including on reset */ object ClientMetadata { def apply(perm: UInt) = { val meta = Wire(new ClientMetadata) meta.state := perm meta } def onReset = ClientMetadata(ClientStates.Nothing) def maximum = ClientMetadata(ClientStates.Dirty) } File HellaCache.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3.{dontTouch, _} import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.amba.AMBAProtField import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType} import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters} import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata} import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle} import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt} import scala.collection.mutable.ListBuffer case class DCacheParams( nSets: Int = 64, nWays: Int = 4, rowBits: Int = 64, subWordBits: Option[Int] = None, replacementPolicy: String = "random", nTLBSets: Int = 1, nTLBWays: Int = 32, nTLBBasePageSectors: Int = 4, nTLBSuperpages: Int = 4, tagECC: Option[String] = None, dataECC: Option[String] = None, dataECCBytes: Int = 1, nMSHRs: Int = 1, nSDQ: Int = 17, nRPQ: Int = 16, nMMIOs: Int = 1, blockBytes: Int = 64, separateUncachedResp: Boolean = false, acquireBeforeRelease: Boolean = false, pipelineWayMux: Boolean = false, clockGate: Boolean = false, scratch: Option[BigInt] = None) extends L1CacheParams { def tagCode: Code = Code.fromString(tagECC) def dataCode: Code = Code.fromString(dataECC) def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0) def replacement = new RandomReplacement(nWays) def silentDrop: Boolean = !acquireBeforeRelease require((!scratch.isDefined || nWays == 1), "Scratchpad only allowed in direct-mapped cache.") require((!scratch.isDefined || nMSHRs == 0), "Scratchpad only allowed in blocking cache.") if (scratch.isEmpty) require(isPow2(nSets), s"nSets($nSets) must be pow2") } trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters { val cacheParams = tileParams.dcache.get val cfg = cacheParams def wordBits = coreDataBits def wordBytes = coreDataBytes def subWordBits = cacheParams.subWordBits.getOrElse(wordBits) def subWordBytes = subWordBits / 8 def wordOffBits = log2Up(wordBytes) def beatBytes = cacheBlockBytes / cacheDataBeats def beatWords = beatBytes / wordBytes def beatOffBits = log2Up(beatBytes) def idxMSB = untagBits-1 def idxLSB = blockOffBits def offsetmsb = idxLSB-1 def offsetlsb = wordOffBits def rowWords = rowBits/wordBits def doNarrowRead = coreDataBits * nWays % rowBits == 0 def eccBytes = cacheParams.dataECCBytes val eccBits = cacheParams.dataECCBytes * 8 val encBits = cacheParams.dataCode.width(eccBits) val encWordBits = encBits * (wordBits / eccBits) def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only def encRowBits = encDataBits*rowWords def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed def lrscBackoff = 3 // disallow LRSC reacquisition briefly def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant def nIOMSHRs = cacheParams.nMMIOs def maxUncachedInFlight = cacheParams.nMMIOs def dataScratchpadSize = cacheParams.dataScratchpadBytes require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)") if (!usingDataScratchpad) require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)") // would need offset addr for puts if data width < xlen require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)") } abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module with HasL1HellaCacheParameters abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasL1HellaCacheParameters /** Bundle definitions for HellaCache interfaces */ trait HasCoreMemOp extends HasL1HellaCacheParameters { val addr = UInt(coreMaxAddrBits.W) val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W)) val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W) val cmd = UInt(M_SZ.W) val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W) val signed = Bool() val dprv = UInt(PRV.SZ.W) val dv = Bool() } trait HasCoreData extends HasCoreParameters { val data = UInt(coreDataBits.W) val mask = UInt(coreDataBytes.W) } class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp { val phys = Bool() val no_resp = Bool() // The dcache may omit generating a response for this request val no_alloc = Bool() val no_xcpt = Bool() } class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp with HasCoreData { val replay = Bool() val has_data = Bool() val data_word_bypass = UInt(coreDataBits.W) val data_raw = UInt(coreDataBits.W) val store_data = UInt(coreDataBits.W) } class AlignmentExceptions extends Bundle { val ld = Bool() val st = Bool() } class HellaCacheExceptions extends Bundle { val ma = new AlignmentExceptions val pf = new AlignmentExceptions val gf = new AlignmentExceptions val ae = new AlignmentExceptions } class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData class HellaCachePerfEvents extends Bundle { val acquire = Bool() val release = Bool() val grant = Bool() val tlbMiss = Bool() val blocked = Bool() val canAcceptStoreThenLoad = Bool() val canAcceptStoreThenRMW = Bool() val canAcceptLoadThenLoad = Bool() val storeBufferEmptyAfterLoad = Bool() val storeBufferEmptyAfterStore = Bool() } // interface between D$ and processor/DTLB class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) { val req = Decoupled(new HellaCacheReq) val s1_kill = Output(Bool()) // kill previous cycle's req val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req val s2_nack = Input(Bool()) // req from two cycles ago is rejected val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint) val s2_kill = Output(Bool()) // kill req from two cycles ago val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO val s2_paddr = Input(UInt(paddrBits.W)) // translated address val resp = Flipped(Valid(new HellaCacheResp)) val replay_next = Input(Bool()) val s2_xcpt = Input(new HellaCacheExceptions) val s2_gpa = Input(UInt(vaddrBitsExtended.W)) val s2_gpa_is_pte = Input(Bool()) val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp))) val ordered = Input(Bool()) val store_pending = Input(Bool()) // there is a store in a store buffer somewhere val perf = Input(new HellaCachePerfEvents()) val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself? val clock_enabled = Input(Bool()) // is D$ currently being clocked? } /** Base classes for Diplomatic TL2 HellaCaches */ abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule with HasNonDiplomaticTileParameters { protected val cfg = tileParams.dcache.get protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache", sourceId = IdRange(0, 1 max cfg.nMSHRs), supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))) protected def mmioClientParameters = Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache MMIO", sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs), requestFifo = true)) def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max val node = TLClientNode(Seq(TLMasterPortParameters.v1( clients = cacheClientParameters ++ mmioClientParameters, minLatency = 1, requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField()))))) val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val module: HellaCacheModule def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT) def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits) require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$") } class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) { val cpu = Flipped(new HellaCacheIO) val ptw = new TLBPTWIO() val errors = new DCacheErrors val tlb_port = new DCacheTLBPort } class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer) with HasL1HellaCacheParameters { implicit val edge: TLEdgeOut = outer.node.edges.out(0) val (tl_out, _) = outer.node.out(0) val io = IO(new HellaCacheBundle) val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle) val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle) dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals dontTouch(io.cpu.s1_data) require(rowBits == edge.bundle.dataBits) private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile) fifoManagers.foreach { m => require (m.fifoId == fifoManagers.head.fifoId, s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+ s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}") } } /** Support overriding which HellaCache is instantiated */ case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply) object HellaCacheFactory { def apply(tile: BaseTile)(p: Parameters): HellaCache = { if (tile.tileParams.dcache.get.nMSHRs == 0) new DCache(tile.tileId, tile.crossing)(p) else new NonBlockingDCache(tile.tileId)(p) } } /** Mix-ins for constructing tiles that have a HellaCache */ trait HasHellaCache { this: BaseTile => val module: HasHellaCacheModule implicit val p: Parameters var nDCachePorts = 0 lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p)) tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode } dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode } InModuleBody { dcache.module.io.tlb_port := DontCare } } trait HasHellaCacheModule { val outer: HasHellaCache with HasTileParameters implicit val p: Parameters val dcachePorts = ListBuffer[HellaCacheIO]() val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p)) outer.dcache.module.io.cpu <> dcacheArb.io.mem } /** Metadata array used for all HellaCaches */ class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val coh = new ClientMetadata val tag = UInt(tagBits.W) } object L1Metadata { def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = { val meta = Wire(new L1Metadata) meta.tag := tag meta.coh := coh meta } } class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val tag = UInt(tagBits.W) } class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) { val data = new L1Metadata } class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) val resp = Output(Vec(nWays, rstVal.cloneType)) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U val waddr = Mux(rst, rst_cnt, io.write.bits.idx) val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools when (rst) { rst_cnt := rst_cnt+1.U } val metabits = rstVal.getWidth val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W))) val wen = rst || io.write.valid when (wen) { tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask) } io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal))) io.read.ready := !wen // so really this could be a 6T RAM io.write.ready := !rst } File TagArray.scala: package shuttle.dmem import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.rocket._ import freechips.rocketchip.tile.{TileKey} class L1MetadataArrayBank[T <: L1Metadata](onReset: () => T, nSets: Int)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) val resp = Vec(nWays, Output(rstVal.cloneType)) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U val waddr = Mux(rst, rst_cnt, io.write.bits.idx) val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt val wmask = Mux(rst || (nWays == 1).B, ~(0.U(nWays.W)), io.write.bits.way_en).asBools val rmask = Mux(rst || (nWays == 1).B, ~(0.U(nWays.W)), io.read.bits.way_en).asBools when (rst) { rst_cnt := rst_cnt + 1.U } val metabits = rstVal.getWidth val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W))) val wen = Wire(Bool()) val ren = Wire(Bool()) val stall_ctr = RegInit(0.U(2.W)) val stall_read = stall_ctr === ~(0.U(2.W)) val force_stall = WireInit(false.B) val rbuf_valid = RegInit(false.B) val rbuf_idx = Reg(UInt(log2Ceil(nSets).W)) val rbuf = Reg(io.resp.cloneType) val forward_from_rbuf = WireInit(false.B) when (io.read.valid && force_stall) { stall_ctr := stall_ctr + 1.U } .otherwise { stall_ctr := 0.U } when (rst) { wen := true.B ren := false.B io.read.ready := false.B io.write.ready := false.B } .otherwise { when (io.read.valid && !stall_read) { when (io.read.bits.idx === rbuf_idx && rbuf_valid) { forward_from_rbuf := true.B ren := false.B wen := io.write.valid io.read.ready := true.B io.write.ready := true.B } .otherwise { ren := true.B wen := false.B io.read.ready := true.B io.write.ready := false.B force_stall := io.write.valid } } .otherwise { ren := false.B wen := io.write.valid io.read.ready := false.B io.write.ready := true.B } } val s1_read_idx = RegEnable(io.read.bits.idx, io.read.valid) when (RegNext(io.read.fire && !forward_from_rbuf) && !(io.write.valid && io.write.bits.idx === s1_read_idx)) { rbuf_valid := true.B rbuf_idx := s1_read_idx rbuf := io.resp } when (io.write.valid && io.write.bits.idx === rbuf_idx) { rbuf_valid := false.B } when (wen) { tag_array.write(waddr, VecInit(Seq.fill(nWays) { wdata }), wmask) } io.resp := tag_array.read(io.read.bits.idx, ren && !wen).map(_.asTypeOf(rstVal)) when (RegNext(forward_from_rbuf)) { io.resp := RegEnable(rbuf, forward_from_rbuf) } } class L1MetadataArrayBanked[T <: L1Metadata](onReset: () => T, nBanks: Int, nReadPorts: Int, nWritePorts: Int)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Vec(nReadPorts, Decoupled(new L1MetaReadReq))) val write = Flipped(Vec(nWritePorts, Decoupled(new L1MetaWriteReq))) val resp = Vec(nReadPorts, Vec(nWays, Output(rstVal.cloneType))) }) // No banking, duplicate the arrays if (nBanks == 0) { val arrays = Seq.fill(nReadPorts) { Module(new L1MetadataArrayBank(onReset, nSets)) } (io.read zip arrays).map { case (r,a) => a.io.read <> r } (io.resp zip arrays).map { case (r,a) => r <> a.io.resp } val write_arb = Module(new Arbiter(new L1MetaWriteReq, nWritePorts)) write_arb.io.in <> io.write write_arb.io.out.ready := arrays.map(_.io.write.ready).reduce(_&&_) arrays.foreach(_.io.write.bits := write_arb.io.out.bits) arrays.zipWithIndex.foreach { case (a,i) => a.io.write.valid := write_arb.io.out.valid && arrays.patch(i, Nil, 1).map(_.io.write.ready).reduce(_&&_) } } else if (nBanks == 1) { val array = Module(new L1MetadataArrayBank(onReset, nSets)) val arb = Module(new Arbiter(new L1MetaReadReq, nReadPorts)) arb.io.in <> io.read array.io.read <> arb.io.out io.resp.foreach(_ <> array.io.resp) val write_arb = Module(new Arbiter(new L1MetaWriteReq, nWritePorts)) write_arb.io.in <> io.write array.io.write <> write_arb.io.out } else { val bankBits = log2Ceil(nBanks) val arrays = Seq.fill(nBanks) { Module(new L1MetadataArrayBank(onReset, nSets/nBanks)) } val arbs = Seq.fill(nBanks) { Module(new Arbiter(new L1MetaReadReq, nReadPorts)) } val write_arbs = Seq.fill(nBanks) { Module(new Arbiter(new L1MetaWriteReq, nWritePorts)) } io.read.foreach(_.ready := false.B) io.resp.foreach(_ := DontCare) for (i <- 0 until nReadPorts) { for (b <- 0 until nBanks) { arbs(b).io.in(i).valid := io.read(i).valid && io.read(i).bits.idx(bankBits-1,0) === b.U arbs(b).io.in(i).bits := io.read(i).bits arbs(b).io.in(i).bits.idx := io.read(i).bits.idx >> bankBits when (io.read(i).bits.idx(bankBits-1,0) === b.U) { io.read(i).ready := arbs(b).io.in(i).ready } } io.resp(i) := Mux1H(UIntToOH(RegNext(io.read(i).bits.idx(bankBits-1,0))), arrays.map(_.io.resp)) } (arbs zip arrays).map { case (r,a) => a.io.read <> r.io.out } io.write.foreach(_.ready := false.B) for (i <- 0 until nWritePorts) { for (b <- 0 until nBanks) { write_arbs(b).io.in(i).valid := io.write(i).valid && io.write(i).bits.idx(bankBits-1,0) === b.U write_arbs(b).io.in(i).bits := io.write(i).bits write_arbs(b).io.in(i).bits.idx := io.write(i).bits.idx >> bankBits when (io.write(i).bits.idx(bankBits-1,0) === b.U) { io.write(i).ready := write_arbs(b).io.in(i).ready } } } (write_arbs zip arrays).map { case (r,a) => a.io.write <> r.io.out } } }
module L1MetadataArrayBanked( // @[TagArray.scala:94:7] input clock, // @[TagArray.scala:94:7] input reset, // @[TagArray.scala:94:7] output io_read_0_ready, // @[TagArray.scala:96:14] input io_read_0_valid, // @[TagArray.scala:96:14] input [5:0] io_read_0_bits_idx, // @[TagArray.scala:96:14] input [19:0] io_read_0_bits_tag, // @[TagArray.scala:96:14] output io_read_1_ready, // @[TagArray.scala:96:14] input io_read_1_valid, // @[TagArray.scala:96:14] input [5:0] io_read_1_bits_idx, // @[TagArray.scala:96:14] output io_write_0_ready, // @[TagArray.scala:96:14] input io_write_0_valid, // @[TagArray.scala:96:14] input [5:0] io_write_0_bits_idx, // @[TagArray.scala:96:14] input [3:0] io_write_0_bits_way_en, // @[TagArray.scala:96:14] input [19:0] io_write_0_bits_tag, // @[TagArray.scala:96:14] input [1:0] io_write_0_bits_data_coh_state, // @[TagArray.scala:96:14] input [19:0] io_write_0_bits_data_tag, // @[TagArray.scala:96:14] output io_write_1_ready, // @[TagArray.scala:96:14] input io_write_1_valid, // @[TagArray.scala:96:14] input [5:0] io_write_1_bits_idx, // @[TagArray.scala:96:14] input [3:0] io_write_1_bits_way_en, // @[TagArray.scala:96:14] input [19:0] io_write_1_bits_tag, // @[TagArray.scala:96:14] input [1:0] io_write_1_bits_data_coh_state, // @[TagArray.scala:96:14] input [19:0] io_write_1_bits_data_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_0_0_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_0_0_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_0_1_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_0_1_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_0_2_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_0_2_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_0_3_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_0_3_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_1_0_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_1_0_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_1_1_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_1_1_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_1_2_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_1_2_tag, // @[TagArray.scala:96:14] output [1:0] io_resp_1_3_coh_state, // @[TagArray.scala:96:14] output [19:0] io_resp_1_3_tag // @[TagArray.scala:96:14] ); wire _write_arbs_3_io_in_0_ready; // @[TagArray.scala:126:47] wire _write_arbs_3_io_in_1_ready; // @[TagArray.scala:126:47] wire _write_arbs_3_io_out_valid; // @[TagArray.scala:126:47] wire [5:0] _write_arbs_3_io_out_bits_idx; // @[TagArray.scala:126:47] wire [3:0] _write_arbs_3_io_out_bits_way_en; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_3_io_out_bits_tag; // @[TagArray.scala:126:47] wire [1:0] _write_arbs_3_io_out_bits_data_coh_state; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_3_io_out_bits_data_tag; // @[TagArray.scala:126:47] wire _write_arbs_2_io_in_0_ready; // @[TagArray.scala:126:47] wire _write_arbs_2_io_in_1_ready; // @[TagArray.scala:126:47] wire _write_arbs_2_io_out_valid; // @[TagArray.scala:126:47] wire [5:0] _write_arbs_2_io_out_bits_idx; // @[TagArray.scala:126:47] wire [3:0] _write_arbs_2_io_out_bits_way_en; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_2_io_out_bits_tag; // @[TagArray.scala:126:47] wire [1:0] _write_arbs_2_io_out_bits_data_coh_state; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_2_io_out_bits_data_tag; // @[TagArray.scala:126:47] wire _write_arbs_1_io_in_0_ready; // @[TagArray.scala:126:47] wire _write_arbs_1_io_in_1_ready; // @[TagArray.scala:126:47] wire _write_arbs_1_io_out_valid; // @[TagArray.scala:126:47] wire [5:0] _write_arbs_1_io_out_bits_idx; // @[TagArray.scala:126:47] wire [3:0] _write_arbs_1_io_out_bits_way_en; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_1_io_out_bits_tag; // @[TagArray.scala:126:47] wire [1:0] _write_arbs_1_io_out_bits_data_coh_state; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_1_io_out_bits_data_tag; // @[TagArray.scala:126:47] wire _write_arbs_0_io_in_0_ready; // @[TagArray.scala:126:47] wire _write_arbs_0_io_in_1_ready; // @[TagArray.scala:126:47] wire _write_arbs_0_io_out_valid; // @[TagArray.scala:126:47] wire [5:0] _write_arbs_0_io_out_bits_idx; // @[TagArray.scala:126:47] wire [3:0] _write_arbs_0_io_out_bits_way_en; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_0_io_out_bits_tag; // @[TagArray.scala:126:47] wire [1:0] _write_arbs_0_io_out_bits_data_coh_state; // @[TagArray.scala:126:47] wire [19:0] _write_arbs_0_io_out_bits_data_tag; // @[TagArray.scala:126:47] wire _arbs_3_io_in_0_ready; // @[TagArray.scala:125:41] wire _arbs_3_io_in_1_ready; // @[TagArray.scala:125:41] wire _arbs_3_io_out_valid; // @[TagArray.scala:125:41] wire [5:0] _arbs_3_io_out_bits_idx; // @[TagArray.scala:125:41] wire [3:0] _arbs_3_io_out_bits_way_en; // @[TagArray.scala:125:41] wire [19:0] _arbs_3_io_out_bits_tag; // @[TagArray.scala:125:41] wire _arbs_2_io_in_0_ready; // @[TagArray.scala:125:41] wire _arbs_2_io_in_1_ready; // @[TagArray.scala:125:41] wire _arbs_2_io_out_valid; // @[TagArray.scala:125:41] wire [5:0] _arbs_2_io_out_bits_idx; // @[TagArray.scala:125:41] wire [3:0] _arbs_2_io_out_bits_way_en; // @[TagArray.scala:125:41] wire [19:0] _arbs_2_io_out_bits_tag; // @[TagArray.scala:125:41] wire _arbs_1_io_in_0_ready; // @[TagArray.scala:125:41] wire _arbs_1_io_in_1_ready; // @[TagArray.scala:125:41] wire _arbs_1_io_out_valid; // @[TagArray.scala:125:41] wire [5:0] _arbs_1_io_out_bits_idx; // @[TagArray.scala:125:41] wire [3:0] _arbs_1_io_out_bits_way_en; // @[TagArray.scala:125:41] wire [19:0] _arbs_1_io_out_bits_tag; // @[TagArray.scala:125:41] wire _arbs_0_io_in_0_ready; // @[TagArray.scala:125:41] wire _arbs_0_io_in_1_ready; // @[TagArray.scala:125:41] wire _arbs_0_io_out_valid; // @[TagArray.scala:125:41] wire [5:0] _arbs_0_io_out_bits_idx; // @[TagArray.scala:125:41] wire [3:0] _arbs_0_io_out_bits_way_en; // @[TagArray.scala:125:41] wire [19:0] _arbs_0_io_out_bits_tag; // @[TagArray.scala:125:41] wire _arrays_3_io_read_ready; // @[TagArray.scala:124:43] wire _arrays_3_io_write_ready; // @[TagArray.scala:124:43] wire [1:0] _arrays_3_io_resp_0_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_3_io_resp_0_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_3_io_resp_1_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_3_io_resp_1_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_3_io_resp_2_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_3_io_resp_2_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_3_io_resp_3_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_3_io_resp_3_tag; // @[TagArray.scala:124:43] wire _arrays_2_io_read_ready; // @[TagArray.scala:124:43] wire _arrays_2_io_write_ready; // @[TagArray.scala:124:43] wire [1:0] _arrays_2_io_resp_0_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_2_io_resp_0_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_2_io_resp_1_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_2_io_resp_1_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_2_io_resp_2_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_2_io_resp_2_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_2_io_resp_3_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_2_io_resp_3_tag; // @[TagArray.scala:124:43] wire _arrays_1_io_read_ready; // @[TagArray.scala:124:43] wire _arrays_1_io_write_ready; // @[TagArray.scala:124:43] wire [1:0] _arrays_1_io_resp_0_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_1_io_resp_0_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_1_io_resp_1_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_1_io_resp_1_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_1_io_resp_2_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_1_io_resp_2_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_1_io_resp_3_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_1_io_resp_3_tag; // @[TagArray.scala:124:43] wire _arrays_0_io_read_ready; // @[TagArray.scala:124:43] wire _arrays_0_io_write_ready; // @[TagArray.scala:124:43] wire [1:0] _arrays_0_io_resp_0_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_0_io_resp_0_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_0_io_resp_1_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_0_io_resp_1_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_0_io_resp_2_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_0_io_resp_2_tag; // @[TagArray.scala:124:43] wire [1:0] _arrays_0_io_resp_3_coh_state; // @[TagArray.scala:124:43] wire [19:0] _arrays_0_io_resp_3_tag; // @[TagArray.scala:124:43] wire io_read_0_valid_0 = io_read_0_valid; // @[TagArray.scala:94:7] wire [5:0] io_read_0_bits_idx_0 = io_read_0_bits_idx; // @[TagArray.scala:94:7] wire [19:0] io_read_0_bits_tag_0 = io_read_0_bits_tag; // @[TagArray.scala:94:7] wire io_read_1_valid_0 = io_read_1_valid; // @[TagArray.scala:94:7] wire [5:0] io_read_1_bits_idx_0 = io_read_1_bits_idx; // @[TagArray.scala:94:7] wire io_write_0_valid_0 = io_write_0_valid; // @[TagArray.scala:94:7] wire [5:0] io_write_0_bits_idx_0 = io_write_0_bits_idx; // @[TagArray.scala:94:7] wire [3:0] io_write_0_bits_way_en_0 = io_write_0_bits_way_en; // @[TagArray.scala:94:7] wire [19:0] io_write_0_bits_tag_0 = io_write_0_bits_tag; // @[TagArray.scala:94:7] wire [1:0] io_write_0_bits_data_coh_state_0 = io_write_0_bits_data_coh_state; // @[TagArray.scala:94:7] wire [19:0] io_write_0_bits_data_tag_0 = io_write_0_bits_data_tag; // @[TagArray.scala:94:7] wire io_write_1_valid_0 = io_write_1_valid; // @[TagArray.scala:94:7] wire [5:0] io_write_1_bits_idx_0 = io_write_1_bits_idx; // @[TagArray.scala:94:7] wire [3:0] io_write_1_bits_way_en_0 = io_write_1_bits_way_en; // @[TagArray.scala:94:7] wire [19:0] io_write_1_bits_tag_0 = io_write_1_bits_tag; // @[TagArray.scala:94:7] wire [1:0] io_write_1_bits_data_coh_state_0 = io_write_1_bits_data_coh_state; // @[TagArray.scala:94:7] wire [19:0] io_write_1_bits_data_tag_0 = io_write_1_bits_data_tag; // @[TagArray.scala:94:7] wire [1:0] rstVal_meta_state = 2'h0; // @[Metadata.scala:160:20] wire [1:0] rstVal_coh_state = 2'h0; // @[HellaCache.scala:305:20] wire [19:0] io_read_1_bits_tag = 20'h0; // @[TagArray.scala:94:7] wire [19:0] rstVal_tag = 20'h0; // @[HellaCache.scala:305:20] wire [3:0] io_read_1_bits_way_en = 4'h0; // @[TagArray.scala:94:7, :96:14, :125:41] wire [3:0] io_read_0_bits_way_en = 4'hF; // @[TagArray.scala:94:7, :96:14, :125:41] wire io_read_0_ready_0; // @[TagArray.scala:94:7] wire io_read_1_ready_0; // @[TagArray.scala:94:7] wire io_write_0_ready_0; // @[TagArray.scala:94:7] wire io_write_1_ready_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_0_0_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_0_0_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_0_1_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_0_1_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_0_2_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_0_2_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_0_3_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_0_3_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_1_0_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_1_0_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_1_1_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_1_1_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_1_2_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_1_2_tag_0; // @[TagArray.scala:94:7] wire [1:0] io_resp_1_3_coh_state_0; // @[TagArray.scala:94:7] wire [19:0] io_resp_1_3_tag_0; // @[TagArray.scala:94:7] wire [1:0] _arbs_0_io_in_0_valid_T = io_read_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire [1:0] _arbs_1_io_in_0_valid_T = io_read_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire [1:0] _arbs_2_io_in_0_valid_T = io_read_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire [1:0] _arbs_3_io_in_0_valid_T = io_read_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire _arbs_0_io_in_0_valid_T_1 = _arbs_0_io_in_0_valid_T == 2'h0; // @[TagArray.scala:131:{74,89}] wire _arbs_0_io_in_0_valid_T_2 = io_read_0_valid_0 & _arbs_0_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire [3:0] _arbs_0_io_in_0_bits_idx_T = io_read_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire [3:0] _arbs_1_io_in_0_bits_idx_T = io_read_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire [3:0] _arbs_2_io_in_0_bits_idx_T = io_read_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire [3:0] _arbs_3_io_in_0_bits_idx_T = io_read_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire _arbs_1_io_in_0_valid_T_1 = _arbs_1_io_in_0_valid_T == 2'h1; // @[TagArray.scala:131:{74,89}] wire _arbs_1_io_in_0_valid_T_2 = io_read_0_valid_0 & _arbs_1_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire _arbs_2_io_in_0_valid_T_1 = _arbs_2_io_in_0_valid_T == 2'h2; // @[TagArray.scala:131:{74,89}] wire _arbs_2_io_in_0_valid_T_2 = io_read_0_valid_0 & _arbs_2_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire _arbs_3_io_in_0_valid_T_1 = &_arbs_3_io_in_0_valid_T; // @[TagArray.scala:131:{74,89}] wire _arbs_3_io_in_0_valid_T_2 = io_read_0_valid_0 & _arbs_3_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire [3:0] _GEN = {{_arbs_3_io_in_0_ready}, {_arbs_2_io_in_0_ready}, {_arbs_1_io_in_0_ready}, {io_read_0_bits_idx_0[1:0] == 2'h0 & _arbs_0_io_in_0_ready}}; // @[TagArray.scala:94:7, :125:41, :127:29, :131:74, :134:{49,58,77}] assign io_read_0_ready_0 = _GEN[io_read_0_bits_idx_0[1:0]]; // @[TagArray.scala:94:7, :131:74, :134:{49,58,77}] reg [1:0] REG; // @[TagArray.scala:136:43] wire _T_10 = REG == 2'h0; // @[Mux.scala:32:36] wire _T_11 = REG == 2'h1; // @[Mux.scala:32:36] wire _T_12 = REG == 2'h2; // @[Mux.scala:32:36] assign io_resp_0_0_tag_0 = (_T_10 ? _arrays_0_io_resp_0_tag : 20'h0) | (_T_11 ? _arrays_1_io_resp_0_tag : 20'h0) | (_T_12 ? _arrays_2_io_resp_0_tag : 20'h0) | ((&REG) ? _arrays_3_io_resp_0_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_0_coh_state_0 = (_T_10 ? _arrays_0_io_resp_0_coh_state : 2'h0) | (_T_11 ? _arrays_1_io_resp_0_coh_state : 2'h0) | (_T_12 ? _arrays_2_io_resp_0_coh_state : 2'h0) | ((&REG) ? _arrays_3_io_resp_0_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_1_tag_0 = (_T_10 ? _arrays_0_io_resp_1_tag : 20'h0) | (_T_11 ? _arrays_1_io_resp_1_tag : 20'h0) | (_T_12 ? _arrays_2_io_resp_1_tag : 20'h0) | ((&REG) ? _arrays_3_io_resp_1_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_1_coh_state_0 = (_T_10 ? _arrays_0_io_resp_1_coh_state : 2'h0) | (_T_11 ? _arrays_1_io_resp_1_coh_state : 2'h0) | (_T_12 ? _arrays_2_io_resp_1_coh_state : 2'h0) | ((&REG) ? _arrays_3_io_resp_1_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_2_tag_0 = (_T_10 ? _arrays_0_io_resp_2_tag : 20'h0) | (_T_11 ? _arrays_1_io_resp_2_tag : 20'h0) | (_T_12 ? _arrays_2_io_resp_2_tag : 20'h0) | ((&REG) ? _arrays_3_io_resp_2_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_2_coh_state_0 = (_T_10 ? _arrays_0_io_resp_2_coh_state : 2'h0) | (_T_11 ? _arrays_1_io_resp_2_coh_state : 2'h0) | (_T_12 ? _arrays_2_io_resp_2_coh_state : 2'h0) | ((&REG) ? _arrays_3_io_resp_2_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_3_tag_0 = (_T_10 ? _arrays_0_io_resp_3_tag : 20'h0) | (_T_11 ? _arrays_1_io_resp_3_tag : 20'h0) | (_T_12 ? _arrays_2_io_resp_3_tag : 20'h0) | ((&REG) ? _arrays_3_io_resp_3_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_0_3_coh_state_0 = (_T_10 ? _arrays_0_io_resp_3_coh_state : 2'h0) | (_T_11 ? _arrays_1_io_resp_3_coh_state : 2'h0) | (_T_12 ? _arrays_2_io_resp_3_coh_state : 2'h0) | ((&REG) ? _arrays_3_io_resp_3_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] wire [1:0] _arbs_0_io_in_1_valid_T = io_read_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire [1:0] _arbs_1_io_in_1_valid_T = io_read_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire [1:0] _arbs_2_io_in_1_valid_T = io_read_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire [1:0] _arbs_3_io_in_1_valid_T = io_read_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74] wire _arbs_0_io_in_1_valid_T_1 = _arbs_0_io_in_1_valid_T == 2'h0; // @[TagArray.scala:131:{74,89}] wire _arbs_0_io_in_1_valid_T_2 = io_read_1_valid_0 & _arbs_0_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire [3:0] _arbs_0_io_in_1_bits_idx_T = io_read_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire [3:0] _arbs_1_io_in_1_bits_idx_T = io_read_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire [3:0] _arbs_2_io_in_1_bits_idx_T = io_read_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire [3:0] _arbs_3_io_in_1_bits_idx_T = io_read_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :133:58] wire _arbs_1_io_in_1_valid_T_1 = _arbs_1_io_in_1_valid_T == 2'h1; // @[TagArray.scala:131:{74,89}] wire _arbs_1_io_in_1_valid_T_2 = io_read_1_valid_0 & _arbs_1_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire _arbs_2_io_in_1_valid_T_1 = _arbs_2_io_in_1_valid_T == 2'h2; // @[TagArray.scala:131:{74,89}] wire _arbs_2_io_in_1_valid_T_2 = io_read_1_valid_0 & _arbs_2_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire _arbs_3_io_in_1_valid_T_1 = &_arbs_3_io_in_1_valid_T; // @[TagArray.scala:131:{74,89}] wire _arbs_3_io_in_1_valid_T_2 = io_read_1_valid_0 & _arbs_3_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :131:{52,89}] wire [3:0] _GEN_0 = {{_arbs_3_io_in_1_ready}, {_arbs_2_io_in_1_ready}, {_arbs_1_io_in_1_ready}, {io_read_1_bits_idx_0[1:0] == 2'h0 & _arbs_0_io_in_1_ready}}; // @[TagArray.scala:94:7, :125:41, :127:29, :131:74, :134:{49,58,77}] assign io_read_1_ready_0 = _GEN_0[io_read_1_bits_idx_0[1:0]]; // @[TagArray.scala:94:7, :131:74, :134:{49,58,77}] reg [1:0] REG_1; // @[TagArray.scala:136:43] wire _T_80 = REG_1 == 2'h0; // @[Mux.scala:32:36] wire _T_81 = REG_1 == 2'h1; // @[Mux.scala:32:36] wire _T_82 = REG_1 == 2'h2; // @[Mux.scala:32:36] assign io_resp_1_0_tag_0 = (_T_80 ? _arrays_0_io_resp_0_tag : 20'h0) | (_T_81 ? _arrays_1_io_resp_0_tag : 20'h0) | (_T_82 ? _arrays_2_io_resp_0_tag : 20'h0) | ((&REG_1) ? _arrays_3_io_resp_0_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_0_coh_state_0 = (_T_80 ? _arrays_0_io_resp_0_coh_state : 2'h0) | (_T_81 ? _arrays_1_io_resp_0_coh_state : 2'h0) | (_T_82 ? _arrays_2_io_resp_0_coh_state : 2'h0) | ((&REG_1) ? _arrays_3_io_resp_0_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_1_tag_0 = (_T_80 ? _arrays_0_io_resp_1_tag : 20'h0) | (_T_81 ? _arrays_1_io_resp_1_tag : 20'h0) | (_T_82 ? _arrays_2_io_resp_1_tag : 20'h0) | ((&REG_1) ? _arrays_3_io_resp_1_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_1_coh_state_0 = (_T_80 ? _arrays_0_io_resp_1_coh_state : 2'h0) | (_T_81 ? _arrays_1_io_resp_1_coh_state : 2'h0) | (_T_82 ? _arrays_2_io_resp_1_coh_state : 2'h0) | ((&REG_1) ? _arrays_3_io_resp_1_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_2_tag_0 = (_T_80 ? _arrays_0_io_resp_2_tag : 20'h0) | (_T_81 ? _arrays_1_io_resp_2_tag : 20'h0) | (_T_82 ? _arrays_2_io_resp_2_tag : 20'h0) | ((&REG_1) ? _arrays_3_io_resp_2_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_2_coh_state_0 = (_T_80 ? _arrays_0_io_resp_2_coh_state : 2'h0) | (_T_81 ? _arrays_1_io_resp_2_coh_state : 2'h0) | (_T_82 ? _arrays_2_io_resp_2_coh_state : 2'h0) | ((&REG_1) ? _arrays_3_io_resp_2_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_3_tag_0 = (_T_80 ? _arrays_0_io_resp_3_tag : 20'h0) | (_T_81 ? _arrays_1_io_resp_3_tag : 20'h0) | (_T_82 ? _arrays_2_io_resp_3_tag : 20'h0) | ((&REG_1) ? _arrays_3_io_resp_3_tag : 20'h0); // @[Mux.scala:30:73, :32:36] assign io_resp_1_3_coh_state_0 = (_T_80 ? _arrays_0_io_resp_3_coh_state : 2'h0) | (_T_81 ? _arrays_1_io_resp_3_coh_state : 2'h0) | (_T_82 ? _arrays_2_io_resp_3_coh_state : 2'h0) | ((&REG_1) ? _arrays_3_io_resp_3_coh_state : 2'h0); // @[Mux.scala:30:73, :32:36] wire [1:0] _write_arbs_0_io_in_0_valid_T = io_write_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire [1:0] _write_arbs_1_io_in_0_valid_T = io_write_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire [1:0] _write_arbs_2_io_in_0_valid_T = io_write_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire [1:0] _write_arbs_3_io_in_0_valid_T = io_write_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire _write_arbs_0_io_in_0_valid_T_1 = _write_arbs_0_io_in_0_valid_T == 2'h0; // @[TagArray.scala:143:{82,97}] wire _write_arbs_0_io_in_0_valid_T_2 = io_write_0_valid_0 & _write_arbs_0_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire [3:0] _write_arbs_0_io_in_0_bits_idx_T = io_write_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire [3:0] _write_arbs_1_io_in_0_bits_idx_T = io_write_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire [3:0] _write_arbs_2_io_in_0_bits_idx_T = io_write_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire [3:0] _write_arbs_3_io_in_0_bits_idx_T = io_write_0_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire _write_arbs_1_io_in_0_valid_T_1 = _write_arbs_1_io_in_0_valid_T == 2'h1; // @[TagArray.scala:143:{82,97}] wire _write_arbs_1_io_in_0_valid_T_2 = io_write_0_valid_0 & _write_arbs_1_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire _write_arbs_2_io_in_0_valid_T_1 = _write_arbs_2_io_in_0_valid_T == 2'h2; // @[TagArray.scala:143:{82,97}] wire _write_arbs_2_io_in_0_valid_T_2 = io_write_0_valid_0 & _write_arbs_2_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire _write_arbs_3_io_in_0_valid_T_1 = &_write_arbs_3_io_in_0_valid_T; // @[TagArray.scala:143:{82,97}] wire _write_arbs_3_io_in_0_valid_T_2 = io_write_0_valid_0 & _write_arbs_3_io_in_0_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire [3:0] _GEN_1 = {{_write_arbs_3_io_in_0_ready}, {_write_arbs_2_io_in_0_ready}, {_write_arbs_1_io_in_0_ready}, {io_write_0_bits_idx_0[1:0] == 2'h0 & _write_arbs_0_io_in_0_ready}}; // @[TagArray.scala:94:7, :126:47, :140:30, :143:82, :146:{50,59,79}] assign io_write_0_ready_0 = _GEN_1[io_write_0_bits_idx_0[1:0]]; // @[TagArray.scala:94:7, :143:82, :146:{50,59,79}] wire [1:0] _write_arbs_0_io_in_1_valid_T = io_write_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire [1:0] _write_arbs_1_io_in_1_valid_T = io_write_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire [1:0] _write_arbs_2_io_in_1_valid_T = io_write_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire [1:0] _write_arbs_3_io_in_1_valid_T = io_write_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :143:82] wire _write_arbs_0_io_in_1_valid_T_1 = _write_arbs_0_io_in_1_valid_T == 2'h0; // @[TagArray.scala:143:{82,97}] wire _write_arbs_0_io_in_1_valid_T_2 = io_write_1_valid_0 & _write_arbs_0_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire [3:0] _write_arbs_0_io_in_1_bits_idx_T = io_write_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire [3:0] _write_arbs_1_io_in_1_bits_idx_T = io_write_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire [3:0] _write_arbs_2_io_in_1_bits_idx_T = io_write_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire [3:0] _write_arbs_3_io_in_1_bits_idx_T = io_write_1_bits_idx_0[5:2]; // @[TagArray.scala:94:7, :145:65] wire _write_arbs_1_io_in_1_valid_T_1 = _write_arbs_1_io_in_1_valid_T == 2'h1; // @[TagArray.scala:143:{82,97}] wire _write_arbs_1_io_in_1_valid_T_2 = io_write_1_valid_0 & _write_arbs_1_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire _write_arbs_2_io_in_1_valid_T_1 = _write_arbs_2_io_in_1_valid_T == 2'h2; // @[TagArray.scala:143:{82,97}] wire _write_arbs_2_io_in_1_valid_T_2 = io_write_1_valid_0 & _write_arbs_2_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire _write_arbs_3_io_in_1_valid_T_1 = &_write_arbs_3_io_in_1_valid_T; // @[TagArray.scala:143:{82,97}] wire _write_arbs_3_io_in_1_valid_T_2 = io_write_1_valid_0 & _write_arbs_3_io_in_1_valid_T_1; // @[TagArray.scala:94:7, :143:{59,97}] wire [3:0] _GEN_2 = {{_write_arbs_3_io_in_1_ready}, {_write_arbs_2_io_in_1_ready}, {_write_arbs_1_io_in_1_ready}, {io_write_1_bits_idx_0[1:0] == 2'h0 & _write_arbs_0_io_in_1_ready}}; // @[TagArray.scala:94:7, :126:47, :140:30, :143:82, :146:{50,59,79}] assign io_write_1_ready_0 = _GEN_2[io_write_1_bits_idx_0[1:0]]; // @[TagArray.scala:94:7, :143:82, :146:{50,59,79}] always @(posedge clock) begin // @[TagArray.scala:94:7] REG <= io_read_0_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74, :136:43] REG_1 <= io_read_1_bits_idx_0[1:0]; // @[TagArray.scala:94:7, :131:74, :136:43] always @(posedge) L1MetadataArrayBank arrays_0 ( // @[TagArray.scala:124:43] .clock (clock), .reset (reset), .io_read_ready (_arrays_0_io_read_ready), .io_read_valid (_arbs_0_io_out_valid), // @[TagArray.scala:125:41] .io_read_bits_idx (_arbs_0_io_out_bits_idx), // @[TagArray.scala:125:41] .io_read_bits_way_en (_arbs_0_io_out_bits_way_en), // @[TagArray.scala:125:41] .io_read_bits_tag (_arbs_0_io_out_bits_tag), // @[TagArray.scala:125:41] .io_write_ready (_arrays_0_io_write_ready), .io_write_valid (_write_arbs_0_io_out_valid), // @[TagArray.scala:126:47] .io_write_bits_idx (_write_arbs_0_io_out_bits_idx), // @[TagArray.scala:126:47] .io_write_bits_way_en (_write_arbs_0_io_out_bits_way_en), // @[TagArray.scala:126:47] .io_write_bits_tag (_write_arbs_0_io_out_bits_tag), // @[TagArray.scala:126:47] .io_write_bits_data_coh_state (_write_arbs_0_io_out_bits_data_coh_state), // @[TagArray.scala:126:47] .io_write_bits_data_tag (_write_arbs_0_io_out_bits_data_tag), // @[TagArray.scala:126:47] .io_resp_0_coh_state (_arrays_0_io_resp_0_coh_state), .io_resp_0_tag (_arrays_0_io_resp_0_tag), .io_resp_1_coh_state (_arrays_0_io_resp_1_coh_state), .io_resp_1_tag (_arrays_0_io_resp_1_tag), .io_resp_2_coh_state (_arrays_0_io_resp_2_coh_state), .io_resp_2_tag (_arrays_0_io_resp_2_tag), .io_resp_3_coh_state (_arrays_0_io_resp_3_coh_state), .io_resp_3_tag (_arrays_0_io_resp_3_tag) ); // @[TagArray.scala:124:43] L1MetadataArrayBank_1 arrays_1 ( // @[TagArray.scala:124:43] .clock (clock), .reset (reset), .io_read_ready (_arrays_1_io_read_ready), .io_read_valid (_arbs_1_io_out_valid), // @[TagArray.scala:125:41] .io_read_bits_idx (_arbs_1_io_out_bits_idx), // @[TagArray.scala:125:41] .io_read_bits_way_en (_arbs_1_io_out_bits_way_en), // @[TagArray.scala:125:41] .io_read_bits_tag (_arbs_1_io_out_bits_tag), // @[TagArray.scala:125:41] .io_write_ready (_arrays_1_io_write_ready), .io_write_valid (_write_arbs_1_io_out_valid), // @[TagArray.scala:126:47] .io_write_bits_idx (_write_arbs_1_io_out_bits_idx), // @[TagArray.scala:126:47] .io_write_bits_way_en (_write_arbs_1_io_out_bits_way_en), // @[TagArray.scala:126:47] .io_write_bits_tag (_write_arbs_1_io_out_bits_tag), // @[TagArray.scala:126:47] .io_write_bits_data_coh_state (_write_arbs_1_io_out_bits_data_coh_state), // @[TagArray.scala:126:47] .io_write_bits_data_tag (_write_arbs_1_io_out_bits_data_tag), // @[TagArray.scala:126:47] .io_resp_0_coh_state (_arrays_1_io_resp_0_coh_state), .io_resp_0_tag (_arrays_1_io_resp_0_tag), .io_resp_1_coh_state (_arrays_1_io_resp_1_coh_state), .io_resp_1_tag (_arrays_1_io_resp_1_tag), .io_resp_2_coh_state (_arrays_1_io_resp_2_coh_state), .io_resp_2_tag (_arrays_1_io_resp_2_tag), .io_resp_3_coh_state (_arrays_1_io_resp_3_coh_state), .io_resp_3_tag (_arrays_1_io_resp_3_tag) ); // @[TagArray.scala:124:43] L1MetadataArrayBank_2 arrays_2 ( // @[TagArray.scala:124:43] .clock (clock), .reset (reset), .io_read_ready (_arrays_2_io_read_ready), .io_read_valid (_arbs_2_io_out_valid), // @[TagArray.scala:125:41] .io_read_bits_idx (_arbs_2_io_out_bits_idx), // @[TagArray.scala:125:41] .io_read_bits_way_en (_arbs_2_io_out_bits_way_en), // @[TagArray.scala:125:41] .io_read_bits_tag (_arbs_2_io_out_bits_tag), // @[TagArray.scala:125:41] .io_write_ready (_arrays_2_io_write_ready), .io_write_valid (_write_arbs_2_io_out_valid), // @[TagArray.scala:126:47] .io_write_bits_idx (_write_arbs_2_io_out_bits_idx), // @[TagArray.scala:126:47] .io_write_bits_way_en (_write_arbs_2_io_out_bits_way_en), // @[TagArray.scala:126:47] .io_write_bits_tag (_write_arbs_2_io_out_bits_tag), // @[TagArray.scala:126:47] .io_write_bits_data_coh_state (_write_arbs_2_io_out_bits_data_coh_state), // @[TagArray.scala:126:47] .io_write_bits_data_tag (_write_arbs_2_io_out_bits_data_tag), // @[TagArray.scala:126:47] .io_resp_0_coh_state (_arrays_2_io_resp_0_coh_state), .io_resp_0_tag (_arrays_2_io_resp_0_tag), .io_resp_1_coh_state (_arrays_2_io_resp_1_coh_state), .io_resp_1_tag (_arrays_2_io_resp_1_tag), .io_resp_2_coh_state (_arrays_2_io_resp_2_coh_state), .io_resp_2_tag (_arrays_2_io_resp_2_tag), .io_resp_3_coh_state (_arrays_2_io_resp_3_coh_state), .io_resp_3_tag (_arrays_2_io_resp_3_tag) ); // @[TagArray.scala:124:43] L1MetadataArrayBank_3 arrays_3 ( // @[TagArray.scala:124:43] .clock (clock), .reset (reset), .io_read_ready (_arrays_3_io_read_ready), .io_read_valid (_arbs_3_io_out_valid), // @[TagArray.scala:125:41] .io_read_bits_idx (_arbs_3_io_out_bits_idx), // @[TagArray.scala:125:41] .io_read_bits_way_en (_arbs_3_io_out_bits_way_en), // @[TagArray.scala:125:41] .io_read_bits_tag (_arbs_3_io_out_bits_tag), // @[TagArray.scala:125:41] .io_write_ready (_arrays_3_io_write_ready), .io_write_valid (_write_arbs_3_io_out_valid), // @[TagArray.scala:126:47] .io_write_bits_idx (_write_arbs_3_io_out_bits_idx), // @[TagArray.scala:126:47] .io_write_bits_way_en (_write_arbs_3_io_out_bits_way_en), // @[TagArray.scala:126:47] .io_write_bits_tag (_write_arbs_3_io_out_bits_tag), // @[TagArray.scala:126:47] .io_write_bits_data_coh_state (_write_arbs_3_io_out_bits_data_coh_state), // @[TagArray.scala:126:47] .io_write_bits_data_tag (_write_arbs_3_io_out_bits_data_tag), // @[TagArray.scala:126:47] .io_resp_0_coh_state (_arrays_3_io_resp_0_coh_state), .io_resp_0_tag (_arrays_3_io_resp_0_tag), .io_resp_1_coh_state (_arrays_3_io_resp_1_coh_state), .io_resp_1_tag (_arrays_3_io_resp_1_tag), .io_resp_2_coh_state (_arrays_3_io_resp_2_coh_state), .io_resp_2_tag (_arrays_3_io_resp_2_tag), .io_resp_3_coh_state (_arrays_3_io_resp_3_coh_state), .io_resp_3_tag (_arrays_3_io_resp_3_tag) ); // @[TagArray.scala:124:43] Arbiter2_L1MetaReadReq arbs_0 ( // @[TagArray.scala:125:41] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_0_io_in_0_ready), .io_in_0_valid (_arbs_0_io_in_0_valid_T_2), // @[TagArray.scala:131:52] .io_in_0_bits_idx ({2'h0, _arbs_0_io_in_0_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_in_0_bits_tag (io_read_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_arbs_0_io_in_1_ready), .io_in_1_valid (_arbs_0_io_in_1_valid_T_2), // @[TagArray.scala:131:52] .io_in_1_bits_idx ({2'h0, _arbs_0_io_in_1_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_out_ready (_arrays_0_io_read_ready), // @[TagArray.scala:124:43] .io_out_valid (_arbs_0_io_out_valid), .io_out_bits_idx (_arbs_0_io_out_bits_idx), .io_out_bits_way_en (_arbs_0_io_out_bits_way_en), .io_out_bits_tag (_arbs_0_io_out_bits_tag) ); // @[TagArray.scala:125:41] Arbiter2_L1MetaReadReq_1 arbs_1 ( // @[TagArray.scala:125:41] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_1_io_in_0_ready), .io_in_0_valid (_arbs_1_io_in_0_valid_T_2), // @[TagArray.scala:131:52] .io_in_0_bits_idx ({2'h0, _arbs_1_io_in_0_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_in_0_bits_tag (io_read_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_arbs_1_io_in_1_ready), .io_in_1_valid (_arbs_1_io_in_1_valid_T_2), // @[TagArray.scala:131:52] .io_in_1_bits_idx ({2'h0, _arbs_1_io_in_1_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_out_ready (_arrays_1_io_read_ready), // @[TagArray.scala:124:43] .io_out_valid (_arbs_1_io_out_valid), .io_out_bits_idx (_arbs_1_io_out_bits_idx), .io_out_bits_way_en (_arbs_1_io_out_bits_way_en), .io_out_bits_tag (_arbs_1_io_out_bits_tag) ); // @[TagArray.scala:125:41] Arbiter2_L1MetaReadReq_2 arbs_2 ( // @[TagArray.scala:125:41] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_2_io_in_0_ready), .io_in_0_valid (_arbs_2_io_in_0_valid_T_2), // @[TagArray.scala:131:52] .io_in_0_bits_idx ({2'h0, _arbs_2_io_in_0_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_in_0_bits_tag (io_read_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_arbs_2_io_in_1_ready), .io_in_1_valid (_arbs_2_io_in_1_valid_T_2), // @[TagArray.scala:131:52] .io_in_1_bits_idx ({2'h0, _arbs_2_io_in_1_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_out_ready (_arrays_2_io_read_ready), // @[TagArray.scala:124:43] .io_out_valid (_arbs_2_io_out_valid), .io_out_bits_idx (_arbs_2_io_out_bits_idx), .io_out_bits_way_en (_arbs_2_io_out_bits_way_en), .io_out_bits_tag (_arbs_2_io_out_bits_tag) ); // @[TagArray.scala:125:41] Arbiter2_L1MetaReadReq_3 arbs_3 ( // @[TagArray.scala:125:41] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_3_io_in_0_ready), .io_in_0_valid (_arbs_3_io_in_0_valid_T_2), // @[TagArray.scala:131:52] .io_in_0_bits_idx ({2'h0, _arbs_3_io_in_0_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_in_0_bits_tag (io_read_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_arbs_3_io_in_1_ready), .io_in_1_valid (_arbs_3_io_in_1_valid_T_2), // @[TagArray.scala:131:52] .io_in_1_bits_idx ({2'h0, _arbs_3_io_in_1_bits_idx_T}), // @[TagArray.scala:133:{35,58}] .io_out_ready (_arrays_3_io_read_ready), // @[TagArray.scala:124:43] .io_out_valid (_arbs_3_io_out_valid), .io_out_bits_idx (_arbs_3_io_out_bits_idx), .io_out_bits_way_en (_arbs_3_io_out_bits_way_en), .io_out_bits_tag (_arbs_3_io_out_bits_tag) ); // @[TagArray.scala:125:41] Arbiter2_L1MetaWriteReq write_arbs_0 ( // @[TagArray.scala:126:47] .clock (clock), .reset (reset), .io_in_0_ready (_write_arbs_0_io_in_0_ready), .io_in_0_valid (_write_arbs_0_io_in_0_valid_T_2), // @[TagArray.scala:143:59] .io_in_0_bits_idx ({2'h0, _write_arbs_0_io_in_0_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_0_bits_way_en (io_write_0_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_0_bits_tag (io_write_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_coh_state (io_write_0_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_tag (io_write_0_bits_data_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_write_arbs_0_io_in_1_ready), .io_in_1_valid (_write_arbs_0_io_in_1_valid_T_2), // @[TagArray.scala:143:59] .io_in_1_bits_idx ({2'h0, _write_arbs_0_io_in_1_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_1_bits_way_en (io_write_1_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_1_bits_tag (io_write_1_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_coh_state (io_write_1_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_tag (io_write_1_bits_data_tag_0), // @[TagArray.scala:94:7] .io_out_ready (_arrays_0_io_write_ready), // @[TagArray.scala:124:43] .io_out_valid (_write_arbs_0_io_out_valid), .io_out_bits_idx (_write_arbs_0_io_out_bits_idx), .io_out_bits_way_en (_write_arbs_0_io_out_bits_way_en), .io_out_bits_tag (_write_arbs_0_io_out_bits_tag), .io_out_bits_data_coh_state (_write_arbs_0_io_out_bits_data_coh_state), .io_out_bits_data_tag (_write_arbs_0_io_out_bits_data_tag) ); // @[TagArray.scala:126:47] Arbiter2_L1MetaWriteReq_1 write_arbs_1 ( // @[TagArray.scala:126:47] .clock (clock), .reset (reset), .io_in_0_ready (_write_arbs_1_io_in_0_ready), .io_in_0_valid (_write_arbs_1_io_in_0_valid_T_2), // @[TagArray.scala:143:59] .io_in_0_bits_idx ({2'h0, _write_arbs_1_io_in_0_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_0_bits_way_en (io_write_0_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_0_bits_tag (io_write_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_coh_state (io_write_0_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_tag (io_write_0_bits_data_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_write_arbs_1_io_in_1_ready), .io_in_1_valid (_write_arbs_1_io_in_1_valid_T_2), // @[TagArray.scala:143:59] .io_in_1_bits_idx ({2'h0, _write_arbs_1_io_in_1_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_1_bits_way_en (io_write_1_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_1_bits_tag (io_write_1_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_coh_state (io_write_1_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_tag (io_write_1_bits_data_tag_0), // @[TagArray.scala:94:7] .io_out_ready (_arrays_1_io_write_ready), // @[TagArray.scala:124:43] .io_out_valid (_write_arbs_1_io_out_valid), .io_out_bits_idx (_write_arbs_1_io_out_bits_idx), .io_out_bits_way_en (_write_arbs_1_io_out_bits_way_en), .io_out_bits_tag (_write_arbs_1_io_out_bits_tag), .io_out_bits_data_coh_state (_write_arbs_1_io_out_bits_data_coh_state), .io_out_bits_data_tag (_write_arbs_1_io_out_bits_data_tag) ); // @[TagArray.scala:126:47] Arbiter2_L1MetaWriteReq_2 write_arbs_2 ( // @[TagArray.scala:126:47] .clock (clock), .reset (reset), .io_in_0_ready (_write_arbs_2_io_in_0_ready), .io_in_0_valid (_write_arbs_2_io_in_0_valid_T_2), // @[TagArray.scala:143:59] .io_in_0_bits_idx ({2'h0, _write_arbs_2_io_in_0_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_0_bits_way_en (io_write_0_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_0_bits_tag (io_write_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_coh_state (io_write_0_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_tag (io_write_0_bits_data_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_write_arbs_2_io_in_1_ready), .io_in_1_valid (_write_arbs_2_io_in_1_valid_T_2), // @[TagArray.scala:143:59] .io_in_1_bits_idx ({2'h0, _write_arbs_2_io_in_1_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_1_bits_way_en (io_write_1_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_1_bits_tag (io_write_1_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_coh_state (io_write_1_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_tag (io_write_1_bits_data_tag_0), // @[TagArray.scala:94:7] .io_out_ready (_arrays_2_io_write_ready), // @[TagArray.scala:124:43] .io_out_valid (_write_arbs_2_io_out_valid), .io_out_bits_idx (_write_arbs_2_io_out_bits_idx), .io_out_bits_way_en (_write_arbs_2_io_out_bits_way_en), .io_out_bits_tag (_write_arbs_2_io_out_bits_tag), .io_out_bits_data_coh_state (_write_arbs_2_io_out_bits_data_coh_state), .io_out_bits_data_tag (_write_arbs_2_io_out_bits_data_tag) ); // @[TagArray.scala:126:47] Arbiter2_L1MetaWriteReq_3 write_arbs_3 ( // @[TagArray.scala:126:47] .clock (clock), .reset (reset), .io_in_0_ready (_write_arbs_3_io_in_0_ready), .io_in_0_valid (_write_arbs_3_io_in_0_valid_T_2), // @[TagArray.scala:143:59] .io_in_0_bits_idx ({2'h0, _write_arbs_3_io_in_0_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_0_bits_way_en (io_write_0_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_0_bits_tag (io_write_0_bits_tag_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_coh_state (io_write_0_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_0_bits_data_tag (io_write_0_bits_data_tag_0), // @[TagArray.scala:94:7] .io_in_1_ready (_write_arbs_3_io_in_1_ready), .io_in_1_valid (_write_arbs_3_io_in_1_valid_T_2), // @[TagArray.scala:143:59] .io_in_1_bits_idx ({2'h0, _write_arbs_3_io_in_1_bits_idx_T}), // @[TagArray.scala:145:{41,65}] .io_in_1_bits_way_en (io_write_1_bits_way_en_0), // @[TagArray.scala:94:7] .io_in_1_bits_tag (io_write_1_bits_tag_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_coh_state (io_write_1_bits_data_coh_state_0), // @[TagArray.scala:94:7] .io_in_1_bits_data_tag (io_write_1_bits_data_tag_0), // @[TagArray.scala:94:7] .io_out_ready (_arrays_3_io_write_ready), // @[TagArray.scala:124:43] .io_out_valid (_write_arbs_3_io_out_valid), .io_out_bits_idx (_write_arbs_3_io_out_bits_idx), .io_out_bits_way_en (_write_arbs_3_io_out_bits_way_en), .io_out_bits_tag (_write_arbs_3_io_out_bits_tag), .io_out_bits_data_coh_state (_write_arbs_3_io_out_bits_data_coh_state), .io_out_bits_data_tag (_write_arbs_3_io_out_bits_data_tag) ); // @[TagArray.scala:126:47] assign io_read_0_ready = io_read_0_ready_0; // @[TagArray.scala:94:7] assign io_read_1_ready = io_read_1_ready_0; // @[TagArray.scala:94:7] assign io_write_0_ready = io_write_0_ready_0; // @[TagArray.scala:94:7] assign io_write_1_ready = io_write_1_ready_0; // @[TagArray.scala:94:7] assign io_resp_0_0_coh_state = io_resp_0_0_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_0_0_tag = io_resp_0_0_tag_0; // @[TagArray.scala:94:7] assign io_resp_0_1_coh_state = io_resp_0_1_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_0_1_tag = io_resp_0_1_tag_0; // @[TagArray.scala:94:7] assign io_resp_0_2_coh_state = io_resp_0_2_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_0_2_tag = io_resp_0_2_tag_0; // @[TagArray.scala:94:7] assign io_resp_0_3_coh_state = io_resp_0_3_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_0_3_tag = io_resp_0_3_tag_0; // @[TagArray.scala:94:7] assign io_resp_1_0_coh_state = io_resp_1_0_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_1_0_tag = io_resp_1_0_tag_0; // @[TagArray.scala:94:7] assign io_resp_1_1_coh_state = io_resp_1_1_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_1_1_tag = io_resp_1_1_tag_0; // @[TagArray.scala:94:7] assign io_resp_1_2_coh_state = io_resp_1_2_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_1_2_tag = io_resp_1_2_tag_0; // @[TagArray.scala:94:7] assign io_resp_1_3_coh_state = io_resp_1_3_coh_state_0; // @[TagArray.scala:94:7] assign io_resp_1_3_tag = io_resp_1_3_tag_0; // @[TagArray.scala:94:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File lsu.scala: //****************************************************************************** // Copyright (c) 2012 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Out-of-Order Load/Store Unit //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // // Load/Store Unit is made up of the Load-Address Queue, the Store-Address // Queue, and the Store-Data queue (LAQ, SAQ, and SDQ). // // Stores are sent to memory at (well, after) commit, loads are executed // optimstically ASAP. If a misspeculation was discovered, the pipeline is // cleared. Loads put to sleep are retried. If a LoadAddr and StoreAddr match, // the Load can receive its data by forwarding data out of the Store-Data // Queue. // // Currently, loads are sent to memory immediately, and in parallel do an // associative search of the SAQ, on entering the LSU. If a hit on the SAQ // search, the memory request is killed on the next cycle, and if the SDQ entry // is valid, the store data is forwarded to the load (delayed to match the // load-use delay to delay with the write-port structural hazard). If the store // data is not present, or it's only a partial match (SB->LH), the load is put // to sleep in the LAQ. // // Memory ordering violations are detected by stores at their addr-gen time by // associatively searching the LAQ for newer loads that have been issued to // memory. // // The store queue contains both speculated and committed stores. // // Only one port to memory... loads and stores have to fight for it, West Side // Story style. // // TODO: // - Add predicting structure for ordering failures // - currently won't STD forward if DMEM is busy // - ability to turn off things if VM is disabled // - reconsider port count of the wakeup, retry stuff package boom.v4.lsu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.rocket import freechips.rocketchip.tilelink._ import freechips.rocketchip.util.Str import boom.v4.common._ import boom.v4.exu.{BrUpdateInfo, Exception, CommitSignals, MemGen, ExeUnitResp, Wakeup} import boom.v4.util._ class BoomDCacheReq(implicit p: Parameters) extends BoomBundle()(p) with HasBoomUOP { val addr = UInt(coreMaxAddrBits.W) val data = Bits(coreDataBits.W) val is_hella = Bool() // Is this the hellacache req? If so this is not tracked in LDQ or STQ } class BoomDCacheResp(implicit p: Parameters) extends BoomBundle()(p) with HasBoomUOP { val data = Bits(coreDataBits.W) val is_hella = Bool() } class LSUDMemIO(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) { // In LSU's dmem stage, send the request val req = new DecoupledIO(Vec(lsuWidth, Valid(new BoomDCacheReq))) // In LSU's LCAM search stage, kill if order fail (or forwarding possible) val s1_kill = Output(Vec(lsuWidth, Bool())) // The s1 inflight request will likely get nacked next cycle val s1_nack_advisory = Input(Vec(lsuWidth, Bool())) // Get a request any cycle val resp = Flipped(Vec(lsuWidth, new ValidIO(new BoomDCacheResp))) // The cache irrevocably accepted our store val store_ack = Flipped(Vec(lsuWidth, new ValidIO(new BoomDCacheReq))) // In our response stage, if we get a nack, we need to reexecute val nack = Flipped(Vec(lsuWidth, new ValidIO(new BoomDCacheReq))) val ll_resp = Flipped(new DecoupledIO(new BoomDCacheResp)) val brupdate = Output(new BrUpdateInfo) val exception = Output(Bool()) val rob_pnr_idx = Output(UInt(robAddrSz.W)) val rob_head_idx = Output(UInt(robAddrSz.W)) val release = Flipped(new DecoupledIO(new TLBundleC(edge.bundle))) // Clears prefetching MSHRs val force_order = Output(Bool()) val ordered = Input(Bool()) val perf = Input(new Bundle { val acquire = Bool() val release = Bool() }) } class LSUCoreIO(implicit p: Parameters) extends BoomBundle()(p) { val agen = Flipped(Vec(lsuWidth, Valid(new MemGen))) val dgen = Flipped(Vec(memWidth + 1, Valid(new MemGen))) val iwakeups = Vec(lsuWidth, Valid(new Wakeup)) val iresp = Vec(lsuWidth, Valid(new ExeUnitResp(xLen))) val fresp = Vec(lsuWidth, Valid(new ExeUnitResp(xLen))) val sfence = Flipped(Valid(new rocket.SFenceReq)) val dis_uops = Flipped(Vec(coreWidth, Valid(new MicroOp))) val dis_ldq_idx = Output(Vec(coreWidth, UInt(ldqAddrSz.W))) val dis_stq_idx = Output(Vec(coreWidth, UInt(stqAddrSz.W))) val ldq_full = Output(Vec(coreWidth, Bool())) val stq_full = Output(Vec(coreWidth, Bool())) val commit = Input(new CommitSignals) val commit_load_at_rob_head = Input(Bool()) // Stores clear busy bit when stdata is received // memWidth for incoming addr/datagen, +1 for fp dgen, +1 for clr_head pointer val clr_bsy = Output(Vec(coreWidth, Valid(UInt(robAddrSz.W)))) // Speculatively safe load (barring memory ordering failure) val clr_unsafe = Output(Vec(lsuWidth, Valid(UInt(robAddrSz.W)))) // Tell the DCache to clear prefetches/speculating misses val fence_dmem = Input(Bool()) val brupdate = Input(new BrUpdateInfo) val rob_pnr_idx = Input(UInt(robAddrSz.W)) val rob_head_idx = Input(UInt(robAddrSz.W)) val exception = Input(Bool()) val fencei_rdy = Output(Bool()) val lxcpt = Output(Valid(new Exception)) val tsc_reg = Input(UInt()) val status = Input(new rocket.MStatus) val bp = Input(Vec(nBreakpoints, new rocket.BP)) val mcontext = Input(UInt()) val scontext = Input(UInt()) val perf = Output(new Bundle { val acquire = Bool() val release = Bool() val tlbMiss = Bool() }) } class LSUIO(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) { val ptw = new rocket.TLBPTWIO val core = new LSUCoreIO val dmem = new LSUDMemIO val hellacache = Flipped(new freechips.rocketchip.rocket.HellaCacheIO) } class LDQEntry(implicit p: Parameters) extends BoomBundle()(p) with HasBoomUOP { val addr = Valid(UInt(coreMaxAddrBits.W)) val addr_is_virtual = Bool() // Virtual address, we got a TLB miss val addr_is_uncacheable = Bool() // Uncacheable, wait until head of ROB to execute val executed = Bool() // load sent to memory, reset by NACKs val succeeded = Bool() val order_fail = Bool() val observed = Bool() val st_dep_mask = UInt(numStqEntries.W) // list of stores older than us val ld_byte_mask = UInt(8.W) val forward_std_val = Bool() val forward_stq_idx = UInt(stqAddrSz.W) // Which store did we get the store-load forward from? val debug_wb_data = UInt(xLen.W) } class STQEntry(implicit p: Parameters) extends BoomBundle()(p) with HasBoomUOP { val addr = Valid(UInt(coreMaxAddrBits.W)) val addr_is_virtual = Bool() // Virtual address, we got a TLB miss val data = Valid(UInt(xLen.W)) val committed = Bool() // committed by ROB val succeeded = Bool() // D$ has ack'd this, we don't need to maintain this anymore val can_execute = Bool() val cleared = Bool() // we've cleared this entry in the ROB val debug_wb_data = UInt(xLen.W) } class LSU(implicit p: Parameters, edge: TLEdgeOut) extends BoomModule()(p) with rocket.HasL1HellaCacheParameters { val io = IO(new LSUIO) //val ldq = Reg(Vec(numLdqEntries, Valid(new LDQEntry))) val ldq_valid = Reg(Vec(numLdqEntries, Bool())) val ldq_uop = Reg(Vec(numLdqEntries, new MicroOp)) val ldq_addr = Reg(Vec(numLdqEntries, Valid(UInt(coreMaxAddrBits.W)))) val ldq_addr_is_virtual = Reg(Vec(numLdqEntries, Bool())) val ldq_addr_is_uncacheable = Reg(Vec(numLdqEntries, Bool())) val ldq_executed = Reg(Vec(numLdqEntries, Bool())) val ldq_succeeded = Reg(Vec(numLdqEntries, Bool())) val ldq_order_fail = Reg(Vec(numLdqEntries, Bool())) val ldq_observed = Reg(Vec(numLdqEntries, Bool())) val ldq_st_dep_mask = Reg(Vec(numLdqEntries, UInt(numStqEntries.W))) val ldq_ld_byte_mask = Reg(Vec(numLdqEntries, UInt(8.W))) val ldq_forward_std_val = Reg(Vec(numLdqEntries, Bool())) val ldq_forward_stq_idx = Reg(Vec(numLdqEntries, UInt(stqAddrSz.W))) val ldq_debug_wb_data = Reg(Vec(numLdqEntries, UInt(xLen.W))) def ldq_read(idx: UInt): Valid[LDQEntry] = { val e = Wire(Valid(new LDQEntry)) e.valid := ldq_valid (idx) e.bits.uop := ldq_uop (idx) e.bits.addr := ldq_addr (idx) e.bits.addr_is_virtual := ldq_addr_is_virtual (idx) e.bits.addr_is_uncacheable := ldq_addr_is_uncacheable(idx) e.bits.executed := ldq_executed (idx) e.bits.succeeded := ldq_succeeded (idx) e.bits.order_fail := ldq_order_fail (idx) e.bits.observed := ldq_observed (idx) e.bits.st_dep_mask := ldq_st_dep_mask (idx) e.bits.ld_byte_mask := ldq_ld_byte_mask (idx) e.bits.forward_std_val := ldq_forward_std_val (idx) e.bits.forward_stq_idx := ldq_forward_stq_idx (idx) e.bits.debug_wb_data := ldq_debug_wb_data (idx) e } //val stq = Reg(Vec(numStqEntries, Valid(new STQEntry))) val stq_valid = Reg(Vec(numStqEntries, Bool())) val stq_uop = Reg(Vec(numStqEntries, new MicroOp)) val stq_addr = Reg(Vec(numStqEntries, Valid(UInt(coreMaxAddrBits.W)))) val stq_addr_is_virtual = Reg(Vec(numStqEntries, Bool())) val stq_data = Reg(Vec(numStqEntries, Valid(UInt(xLen.W)))) val stq_committed = Reg(Vec(numStqEntries, Bool())) val stq_succeeded = Reg(Vec(numStqEntries, Bool())) val stq_can_execute = Reg(Vec(numStqEntries, Bool())) val stq_cleared = Reg(Vec(numStqEntries, Bool())) val stq_debug_wb_data = Reg(Vec(numStqEntries, UInt(xLen.W))) def stq_read(idx: UInt): Valid[STQEntry] = { val e = Wire(Valid(new STQEntry)) e.valid := stq_valid (idx) e.bits.uop := stq_uop (idx) e.bits.addr := stq_addr (idx) e.bits.addr_is_virtual := stq_addr_is_virtual (idx) e.bits.data := stq_data (idx) e.bits.committed := stq_committed (idx) e.bits.succeeded := stq_succeeded (idx) e.bits.can_execute := stq_can_execute (idx) e.bits.cleared := stq_cleared (idx) e.bits.debug_wb_data := stq_debug_wb_data (idx) e } val ldq_head = Reg(UInt(ldqAddrSz.W)) val ldq_tail = Reg(UInt(ldqAddrSz.W)) val stq_head = Reg(UInt(stqAddrSz.W)) // point to next store to clear from STQ (i.e., send to memory) val stq_tail = Reg(UInt(stqAddrSz.W)) val stq_commit_head = Reg(UInt(stqAddrSz.W)) // point to next store to commit val stq_execute_head = Reg(UInt(stqAddrSz.W)) // point to next store to execute val h_ready :: h_s1 :: h_s2 :: h_s2_nack :: h_wait :: h_replay :: h_dead :: Nil = Enum(7) // s1 : do TLB, if success and not killed, fire request go to h_s2 // store s1_data to register // if tlb miss, go to s2_nack // if don't get TLB, go to s2_nack // store tlb xcpt // s2 : If kill, go to dead // If tlb xcpt, send tlb xcpt, go to dead // s2_nack : send nack, go to dead // wait : wait for response, if nack, go to replay // replay : refire request, use already translated address // dead : wait for response, ignore it val hella_state = RegInit(h_ready) val hella_req = Reg(new rocket.HellaCacheReq) val hella_data = Reg(new rocket.HellaCacheWriteData) val hella_paddr = Reg(UInt(paddrBits.W)) val hella_xcpt = Reg(new rocket.HellaCacheExceptions) val dtlb = Module(new NBDTLB( instruction = false, lgMaxSize = log2Ceil(coreDataBytes), rocket.TLBConfig(dcacheParams.nTLBSets, dcacheParams.nTLBWays))) io.ptw <> dtlb.io.ptw io.core.perf.tlbMiss := io.ptw.req.fire io.core.perf.acquire := io.dmem.perf.acquire io.core.perf.release := io.dmem.perf.release val clear_store = WireInit(false.B) def widthMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f)) // default connection from flip flop to next state val ldq_will_succeed = WireDefault(ldq_succeeded) ldq_succeeded := ldq_will_succeed //------------------------------------------------------------- //------------------------------------------------------------- // Enqueue new entries //------------------------------------------------------------- //------------------------------------------------------------- // This is a newer store than existing loads, so clear the bit in all the store dependency masks for (i <- 0 until numLdqEntries) { when (clear_store) { ldq_st_dep_mask(i) := ldq_st_dep_mask(i) & ~(1.U << stq_head) } } // Decode stage var ld_enq_idx = ldq_tail var st_enq_idx = stq_tail val dis_ldq_oh = Reg(Vec(numLdqEntries, Bool())) val dis_stq_oh = Reg(Vec(numStqEntries, Bool())) dis_ldq_oh := 0.U(numLdqEntries.W).asBools dis_stq_oh := 0.U(numStqEntries.W).asBools val dis_uops = Reg(Vec(coreWidth, Valid(new MicroOp))) val live_store_mask = stq_valid.asUInt | dis_stq_oh.asUInt var next_live_store_mask = Mux(clear_store, live_store_mask & ~(1.U << stq_head), live_store_mask) var ldq_tail_oh = UIntToOH(ldq_tail)(numLdqEntries-1,0) val ldq_valids = ldq_valid.asUInt | dis_ldq_oh.asUInt var stq_tail_oh = UIntToOH(stq_tail)(numStqEntries-1,0) val stq_valids = stq_valid.asUInt | dis_stq_oh.asUInt val stq_nonempty = stq_valid.reduce(_||_) for (w <- 0 until coreWidth) { val ldq_full = (ldq_tail_oh & ldq_valids).orR io.core.ldq_full(w) := ldq_full io.core.dis_ldq_idx(w) := ld_enq_idx val stq_full = (stq_tail_oh & stq_valids).orR io.core.stq_full(w) := stq_full io.core.dis_stq_idx(w) := st_enq_idx val dis_ld_val = io.core.dis_uops(w).valid && io.core.dis_uops(w).bits.uses_ldq && !io.core.dis_uops(w).bits.exception val dis_st_val = io.core.dis_uops(w).valid && io.core.dis_uops(w).bits.uses_stq && !io.core.dis_uops(w).bits.exception dis_uops(w).valid := dis_ld_val || dis_st_val dis_uops(w).bits := io.core.dis_uops(w).bits when (dis_ld_val) { dis_ldq_oh(ld_enq_idx) := true.B ldq_st_dep_mask(ld_enq_idx) := next_live_store_mask assert (ld_enq_idx === io.core.dis_uops(w).bits.ldq_idx, "[lsu] mismatch enq load tag.") assert (!ldq_valid(ld_enq_idx), "[lsu] Enqueuing uop is overwriting ldq entries") } when (dis_st_val) { dis_stq_oh(st_enq_idx) := true.B assert (st_enq_idx === io.core.dis_uops(w).bits.stq_idx, "[lsu] mismatch enq store tag.") assert (!stq_valid(st_enq_idx), "[lsu] Enqueuing uop is overwriting stq entries") } when (dis_uops(w).valid && dis_uops(w).bits.uses_ldq) { val ldq_idx = dis_uops(w).bits.ldq_idx ldq_valid (ldq_idx) := !IsKilledByBranch(io.core.brupdate, io.core.exception, dis_uops(w).bits) ldq_uop (ldq_idx) := UpdateBrMask(io.core.brupdate, dis_uops(w).bits) ldq_addr (ldq_idx).valid := false.B ldq_executed (ldq_idx) := false.B ldq_will_succeed (ldq_idx) := false.B ldq_order_fail (ldq_idx) := false.B ldq_observed (ldq_idx) := false.B ldq_forward_std_val(ldq_idx) := false.B } when (dis_uops(w).valid && dis_uops(w).bits.uses_stq) { val stq_idx = dis_uops(w).bits.stq_idx stq_valid (stq_idx) := !IsKilledByBranch(io.core.brupdate, io.core.exception, dis_uops(w).bits) stq_uop (stq_idx) := UpdateBrMask(io.core.brupdate, dis_uops(w).bits) stq_addr (stq_idx).valid := false.B stq_data (stq_idx).valid := false.B stq_committed (stq_idx) := false.B stq_can_execute (stq_idx) := false.B stq_succeeded (stq_idx) := false.B stq_cleared (stq_idx) := false.B } ld_enq_idx = Mux(dis_ld_val, WrapInc(ld_enq_idx, numLdqEntries), ld_enq_idx) next_live_store_mask = Mux(dis_st_val, next_live_store_mask | (1.U << st_enq_idx), next_live_store_mask) st_enq_idx = Mux(dis_st_val, WrapInc(st_enq_idx, numStqEntries), st_enq_idx) assert(!(dis_ld_val && dis_st_val), "A UOP is trying to go into both the LDQ and the STQ") ldq_tail_oh = Mux((io.core.dis_uops(w).bits.uses_ldq && !io.core.dis_uops(w).bits.exception) || !enableCompactingLSUDuringDispatch.B, RotateL1(ldq_tail_oh), ldq_tail_oh) stq_tail_oh = Mux((io.core.dis_uops(w).bits.uses_stq && !io.core.dis_uops(w).bits.exception) || !enableCompactingLSUDuringDispatch.B, RotateL1(stq_tail_oh), stq_tail_oh) } ldq_tail := ld_enq_idx stq_tail := st_enq_idx // If we got a mispredict, the tail will be misaligned for 1 extra cycle assert (io.core.brupdate.b2.mispredict || stq_valid(stq_execute_head) || dis_stq_oh(stq_execute_head) || stq_head === stq_execute_head || stq_tail === stq_execute_head, "stq_execute_head got off track.") io.dmem.force_order := io.core.fence_dmem io.core.fencei_rdy := !stq_nonempty && io.dmem.ordered //------------------------------------------------------------- //------------------------------------------------------------- // Execute stage (access TLB, send requests to Memory) //------------------------------------------------------------- //------------------------------------------------------------- // We can only report 1 exception per cycle. // Just be sure to report the youngest one val mem_xcpt_valid = Wire(Bool()) val mem_xcpt_cause = Wire(UInt()) val mem_xcpt_uop = Wire(new MicroOp) val mem_xcpt_vaddr = Wire(UInt()) //--------------------------------------- // Can-fire logic and wakeup/retry select // // First we determine what operations are waiting to execute. // These are the "can_fire"/"will_fire" signals val will_fire_load_agen_exec = Wire(Vec(lsuWidth, Bool())) val will_fire_load_agen = Wire(Vec(lsuWidth, Bool())) val will_fire_store_agen = Wire(Vec(lsuWidth, Bool())) val will_fire_sfence = Wire(Vec(lsuWidth, Bool())) val will_fire_hella_incoming = Wire(Vec(lsuWidth, Bool())) val will_fire_hella_wakeup = Wire(Vec(lsuWidth, Bool())) val will_fire_release = Wire(Vec(lsuWidth, Bool())) val will_fire_load_retry = Wire(Vec(lsuWidth, Bool())) val will_fire_store_retry = Wire(Vec(lsuWidth, Bool())) val will_fire_store_commit_fast = Wire(Vec(lsuWidth, Bool())) val will_fire_store_commit_slow = Wire(Vec(lsuWidth, Bool())) val will_fire_load_wakeup = Wire(Vec(lsuWidth, Bool())) val agen = io.core.agen // ------------------------------- // Assorted signals for scheduling // Don't wakeup a load if we just sent it last cycle or two cycles ago // The block_load_mask may be wrong, but the executing_load mask must be accurate val block_load_mask = WireInit(VecInit((0 until numLdqEntries).map(x=>false.B))) val p1_block_load_mask = RegNext(block_load_mask) val p2_block_load_mask = RegNext(p1_block_load_mask) // Prioritize emptying the store queue when it is almost full val stq_tail_plus = WrapAdd(stq_tail, (2*coreWidth).U, numStqEntries) val stq_almost_full = RegNext(IsOlder(stq_head, stq_tail_plus, stq_tail)) // The store at the commit head needs the DCache to appear ordered // Delay firing load wakeups and retries now val store_needs_order = WireInit(false.B) val ldq_incoming_idx = widthMap(i => agen(i).bits.uop.ldq_idx) val ldq_incoming_e = widthMap(i => WireInit(ldq_read(ldq_incoming_idx(i)))) val stq_incoming_idx = widthMap(i => agen(i).bits.uop.stq_idx) val stq_incoming_e = widthMap(i => WireInit(stq_read(stq_incoming_idx(i)))) val ldq_wakeup_idx = RegNext(AgePriorityEncoder((0 until numLdqEntries).map(i=> { val block = block_load_mask(i) || p1_block_load_mask(i) ldq_addr(i).valid && !ldq_executed(i) && !ldq_succeeded(i) && !ldq_addr_is_virtual(i) && !block }), ldq_head)) val ldq_wakeup_e = WireInit(ldq_read(ldq_wakeup_idx)) // Enqueue into the retry queue val ldq_enq_retry_idx = Reg(UInt(ldqAddrSz.W)) ldq_enq_retry_idx := AgePriorityEncoder((0 until numLdqEntries).map(i => { ldq_addr(i).valid && ldq_addr_is_virtual(i) && (i.U =/= ldq_enq_retry_idx) }), ldq_head) val ldq_enq_retry_e = WireInit(ldq_read(ldq_enq_retry_idx)) val stq_enq_retry_idx = Reg(UInt(stqAddrSz.W)) stq_enq_retry_idx := AgePriorityEncoder((0 until numStqEntries).map(i => { stq_addr(i).valid && stq_addr_is_virtual(i) && (i.U =/= stq_enq_retry_idx) }), stq_commit_head) val stq_enq_retry_e = WireInit(stq_read(stq_enq_retry_idx)) val can_enq_load_retry = (ldq_enq_retry_e.valid && ldq_enq_retry_e.bits.addr.valid && ldq_enq_retry_e.bits.addr_is_virtual) val can_enq_store_retry = (stq_enq_retry_e.valid && stq_enq_retry_e.bits.addr.valid && stq_enq_retry_e.bits.addr_is_virtual) val retry_queue = Module(new BranchKillableQueue(new MemGen, 8)) retry_queue.io.brupdate := io.core.brupdate retry_queue.io.flush := io.core.exception retry_queue.io.enq.valid := can_enq_store_retry || can_enq_load_retry retry_queue.io.enq.bits := DontCare retry_queue.io.enq.bits.data := Mux(can_enq_store_retry, stq_enq_retry_e.bits.addr.bits, ldq_enq_retry_e.bits.addr.bits) retry_queue.io.enq.bits.uop := Mux(can_enq_store_retry, stq_enq_retry_e.bits.uop , ldq_enq_retry_e.bits.uop) retry_queue.io.enq.bits.uop.uses_ldq := can_enq_load_retry && !can_enq_store_retry retry_queue.io.enq.bits.uop.ldq_idx := ldq_enq_retry_idx retry_queue.io.enq.bits.uop.uses_stq := can_enq_store_retry retry_queue.io.enq.bits.uop.stq_idx := stq_enq_retry_idx when (can_enq_store_retry && retry_queue.io.enq.fire) { stq_addr(stq_enq_retry_idx).valid := false.B } .elsewhen (can_enq_load_retry && retry_queue.io.enq.fire) { ldq_addr(ldq_enq_retry_idx).valid := false.B } val ldq_retry_idx = retry_queue.io.deq.bits.uop.ldq_idx val stq_retry_idx = retry_queue.io.deq.bits.uop.stq_idx retry_queue.io.deq.ready := will_fire_load_retry.reduce(_||_) || will_fire_store_retry.reduce(_||_) val stq_execute_queue_flush = WireInit(false.B) val stq_execute_queue = withReset(reset.asBool || stq_execute_queue_flush) { Module(new Queue(new STQEntry, 4)) } val stq_commit_e = stq_execute_queue.io.deq val stq_enq_e = WireInit(stq_read(stq_execute_head)) stq_execute_queue.io.enq.bits := stq_enq_e.bits stq_execute_queue.io.enq.bits.uop.stq_idx := stq_execute_head stq_execute_queue.io.deq.ready := will_fire_store_commit_fast.reduce(_||_) || will_fire_store_commit_slow.reduce(_||_) val can_enq_store_execute = ( stq_enq_e.valid && stq_enq_e.bits.addr.valid && stq_enq_e.bits.data.valid && !stq_enq_e.bits.addr_is_virtual && !stq_enq_e.bits.uop.exception && !stq_enq_e.bits.uop.is_fence && (stq_enq_e.bits.committed || stq_enq_e.bits.uop.is_amo) ) stq_execute_queue.io.enq.valid := can_enq_store_execute when (can_enq_store_execute && stq_execute_queue.io.enq.fire) { stq_execute_head := WrapInc(stq_execute_head, numStqEntries) } // ----------------------- // Determine what can fire // Can we fire a incoming load val can_fire_load_agen_exec = widthMap(w => agen(w).valid && agen(w).bits.uop.uses_ldq) val can_fire_load_agen = can_fire_load_agen_exec // Can we fire an incoming store addrgen + store datagen val can_fire_store_agen = widthMap(w => agen(w).valid && agen(w).bits.uop.uses_stq) // Can we fire an incoming sfence val can_fire_sfence = widthMap(w => io.core.sfence.valid) // Can we fire a request from dcache to release a line // This needs to go through LDQ search to mark loads as dangerous val can_fire_release = widthMap(w => (w == lsuWidth-1).B && io.dmem.release.valid) io.dmem.release.ready := will_fire_release.reduce(_||_) // Can we retry a load that missed in the TLB val can_fire_load_retry = widthMap(w => ( retry_queue.io.deq.valid && retry_queue.io.deq.bits.uop.uses_ldq && !RegNext(store_needs_order) && (w == lsuWidth-1).B)) // Can we retry a store addrgen that missed in the TLB val can_fire_store_retry = widthMap(w => ( retry_queue.io.deq.valid && retry_queue.io.deq.bits.uop.uses_stq && (w == lsuWidth-1).B)) // Can we commit a store val can_fire_store_commit_slow = widthMap(w => ( stq_commit_e.valid && !mem_xcpt_valid && (w == 0).B)) val can_fire_store_commit_fast = widthMap(w => can_fire_store_commit_slow(w) && stq_almost_full) // Can we wakeup a load that was nack'd val block_load_wakeup = WireInit(false.B) val can_fire_load_wakeup = widthMap(w => ( ldq_wakeup_e.valid && ldq_wakeup_e.bits.addr.valid && !ldq_wakeup_e.bits.succeeded && !ldq_wakeup_e.bits.addr_is_virtual && !ldq_wakeup_e.bits.executed && !ldq_wakeup_e.bits.order_fail && !p1_block_load_mask(ldq_wakeup_idx) && !p2_block_load_mask(ldq_wakeup_idx) && !RegNext(store_needs_order) && !block_load_wakeup && (w == lsuWidth-1).B && (!ldq_wakeup_e.bits.addr_is_uncacheable || (io.core.commit_load_at_rob_head && ldq_head === ldq_wakeup_idx && ldq_wakeup_e.bits.st_dep_mask.asUInt === 0.U)))) // Can we fire an incoming hellacache request val can_fire_hella_incoming = WireInit(widthMap(w => false.B)) // This is assigned to in the hellashim ocntroller // Can we fire a hellacache request that the dcache nack'd val can_fire_hella_wakeup = WireInit(widthMap(w => false.B)) // This is assigned to in the hellashim controller //--------------------------------------------------------- // Controller logic. Arbitrate which request actually fires val exe_tlb_valid = Wire(Vec(lsuWidth, Bool())) for (w <- 0 until lsuWidth) { var tlb_avail = true.B var dc_avail = true.B var lcam_avail = true.B def lsu_sched(can_fire: Bool, uses_tlb:Boolean, uses_dc:Boolean, uses_lcam: Boolean): Bool = { val will_fire = can_fire && !(uses_tlb.B && !tlb_avail) && !(uses_lcam.B && !lcam_avail) && !(uses_dc.B && !dc_avail) tlb_avail = tlb_avail && !(will_fire && uses_tlb.B) lcam_avail = lcam_avail && !(will_fire && uses_lcam.B) dc_avail = dc_avail && !(will_fire && uses_dc.B) dontTouch(will_fire) // dontTouch these so we can inspect the will_fire signals will_fire } // The order of these statements is the priority // Some restrictions // - Incoming ops must get precedence, can't backpresure memaddrgen // - Incoming hellacache ops must get precedence over retrying ops (PTW must get precedence over retrying translation) // Notes on performance // - Prioritize releases, this speeds up cache line writebacks and refills // - Store commits are lowest priority, since they don't "block" younger instructions unless stq fills up will_fire_sfence (w) := lsu_sched(can_fire_sfence (w) , true , false, false) // TLB , , will_fire_store_commit_fast(w) := lsu_sched(can_fire_store_commit_fast(w) , false, true , false) // , DC If store queue is filling up, prioritize draining it will_fire_load_agen_exec (w) := lsu_sched(can_fire_load_agen_exec (w) , true , true , true ) // TLB , DC , LCAM Normally fire loads as soon as translation completes will_fire_load_agen (w) := lsu_sched(can_fire_load_agen (w) , true , false, true ) // TLB , , LCAM If we are draining stores, still translate the loads will_fire_store_agen (w) := lsu_sched(can_fire_store_agen (w) , true , false, true ) // TLB , , LCAM will_fire_release (w) := lsu_sched(can_fire_release (w) , false, false, true ) // LCAM will_fire_hella_incoming (w) := lsu_sched(can_fire_hella_incoming (w) , true , true , false) // TLB , DC will_fire_hella_wakeup (w) := lsu_sched(can_fire_hella_wakeup (w) , false, true , false) // , DC will_fire_store_retry (w) := lsu_sched(can_fire_store_retry (w) , true , false, true ) // TLB , , LCAM will_fire_load_retry (w) := lsu_sched(can_fire_load_retry (w) , true , true , true ) // TLB , DC , LCAM will_fire_load_wakeup (w) := lsu_sched(can_fire_load_wakeup (w) , false, true , true ) // , DC , LCAM will_fire_store_commit_slow(w) := lsu_sched(can_fire_store_commit_slow(w) , false, true , false) // , DC assert(!(agen(w).valid && !(will_fire_load_agen_exec(w) || will_fire_load_agen(w) || will_fire_store_agen(w)))) when (will_fire_load_wakeup(w)) { block_load_mask(ldq_wakeup_idx) := true.B } .elsewhen (will_fire_load_agen(w) || will_fire_load_agen_exec(w)) { block_load_mask(agen(w).bits.uop.ldq_idx) := true.B } .elsewhen (will_fire_load_retry(w)) { block_load_mask(ldq_retry_idx) := true.B } exe_tlb_valid(w) := !tlb_avail } assert((lsuWidth == 1).B || (!will_fire_hella_incoming.reduce(_&&_) && !will_fire_hella_wakeup.reduce(_&&_) && !will_fire_load_retry.reduce(_&&_) && !will_fire_store_retry.reduce(_&&_) && !will_fire_store_commit_fast.reduce(_&&_) && !will_fire_store_commit_slow.reduce(_&&_) && !will_fire_load_wakeup.reduce(_&&_)), "Some operations is proceeding down multiple pipes") require(lsuWidth <= 2) //-------------------------------------------- // TLB Access assert(!(hella_state =/= h_ready && hella_req.cmd === rocket.M_SFENCE), "SFENCE through hella interface not supported") val exe_tlb_uop = widthMap(w => Mux(will_fire_load_agen_exec(w) || will_fire_load_agen (w) , ldq_incoming_e(w).bits.uop, Mux(will_fire_store_agen (w) , stq_incoming_e(w).bits.uop, Mux(will_fire_load_retry (w) || will_fire_store_retry (w) , retry_queue.io.deq.bits.uop, Mux(will_fire_hella_incoming(w) , 0.U.asTypeOf(new MicroOp), 0.U.asTypeOf(new MicroOp)))))) val exe_tlb_vaddr = widthMap(w => Mux(will_fire_load_agen_exec(w) || will_fire_load_agen (w) || will_fire_store_agen (w) , agen(w).bits.data, Mux(will_fire_sfence (w) , io.core.sfence.bits.addr, Mux(will_fire_load_retry (w) || will_fire_store_retry (w) , retry_queue.io.deq.bits.data, Mux(will_fire_hella_incoming(w) , hella_req.addr, 0.U))))) val exe_sfence = io.core.sfence val exe_size = widthMap(w => Mux(will_fire_load_agen_exec(w) || will_fire_load_agen (w) || will_fire_store_agen (w) || will_fire_load_retry (w) || will_fire_store_retry (w) , exe_tlb_uop(w).mem_size, Mux(will_fire_hella_incoming(w) , hella_req.size, 0.U))) val exe_cmd = widthMap(w => Mux(will_fire_load_agen_exec(w) || will_fire_load_agen (w) || will_fire_store_agen (w) || will_fire_load_retry (w) || will_fire_store_retry (w) , exe_tlb_uop(w).mem_cmd, Mux(will_fire_hella_incoming(w) , hella_req.cmd, Mux(will_fire_sfence (w) , rocket.M_SFENCE, 0.U)))) val exe_passthr= widthMap(w => Mux(will_fire_hella_incoming(w) , hella_req.phys, false.B)) val exe_kill = widthMap(w => Mux(will_fire_hella_incoming(w) , io.hellacache.s1_kill, false.B)) val bkptu = Seq.fill(lsuWidth) { Module(new rocket.BreakpointUnit(nBreakpoints)) } for (w <- 0 until lsuWidth) { dtlb.io.req(w).valid := exe_tlb_valid(w) dtlb.io.req(w).bits.vaddr := exe_tlb_vaddr(w) dtlb.io.req(w).bits.size := exe_size(w) dtlb.io.req(w).bits.cmd := exe_cmd(w) dtlb.io.req(w).bits.passthrough := exe_passthr(w) dtlb.io.req(w).bits.prv := io.ptw.status.prv dtlb.io.req(w).bits.v := io.ptw.status.v bkptu(w).io.status := io.core.status bkptu(w).io.bp := io.core.bp bkptu(w).io.pc := DontCare bkptu(w).io.ea := exe_tlb_vaddr(w) bkptu(w).io.mcontext := io.core.mcontext bkptu(w).io.scontext := io.core.scontext } dtlb.io.kill := exe_kill.reduce(_||_) dtlb.io.sfence := exe_sfence // exceptions val ma_ld = widthMap(w => dtlb.io.resp(w).ma.ld && exe_tlb_uop(w).uses_ldq) val ma_st = widthMap(w => dtlb.io.resp(w).ma.st && exe_tlb_uop(w).uses_stq && !exe_tlb_uop(w).is_fence) val pf_ld = widthMap(w => dtlb.io.resp(w).pf.ld && exe_tlb_uop(w).uses_ldq) val pf_st = widthMap(w => dtlb.io.resp(w).pf.st && exe_tlb_uop(w).uses_stq) val ae_ld = widthMap(w => dtlb.io.resp(w).ae.ld && exe_tlb_uop(w).uses_ldq) val ae_st = widthMap(w => dtlb.io.resp(w).ae.st && exe_tlb_uop(w).uses_stq) val dbg_bp = widthMap(w => bkptu(w).io.debug_st && ((exe_tlb_uop(w).uses_ldq && bkptu(w).io.debug_ld) || (exe_tlb_uop(w).uses_stq && bkptu(w).io.debug_st && !exe_tlb_uop(w).is_fence))) val bp = widthMap(w => bkptu(w).io.debug_st && ((exe_tlb_uop(w).uses_ldq && bkptu(w).io.xcpt_ld) || (exe_tlb_uop(w).uses_stq && bkptu(w).io.xcpt_st && !exe_tlb_uop(w).is_fence))) // TODO check for xcpt_if and verify that never happens on non-speculative instructions. val mem_xcpt_valids = RegNext(widthMap(w => exe_tlb_valid(w) && (pf_ld(w) || pf_st(w) || ae_ld(w) || ae_st(w) || ma_ld(w) || ma_st(w) || dbg_bp(w) || bp(w)) && !IsKilledByBranch(io.core.brupdate, io.core.exception, exe_tlb_uop(w)))) val mem_xcpt_uops = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, exe_tlb_uop(w)))) val mem_xcpt_causes = RegNext(widthMap(w => Mux(dbg_bp(w), rocket.CSR.debugTriggerCause.U, Mux(bp (w), rocket.Causes.breakpoint.U, Mux(ma_st (w), rocket.Causes.misaligned_store.U, Mux(ma_ld (w), rocket.Causes.misaligned_load.U, Mux(pf_st (w), rocket.Causes.store_page_fault.U, Mux(pf_ld (w), rocket.Causes.load_page_fault.U, Mux(ae_st (w), rocket.Causes.store_access.U, Mux(ae_ld (w), rocket.Causes.load_access.U, 0.U)))))))))) val mem_xcpt_vaddrs = RegNext(exe_tlb_vaddr) for (w <- 0 until lsuWidth) { assert (!(dtlb.io.req(w).valid && exe_tlb_uop(w).is_fence), "Fence is pretending to talk to the TLB") } mem_xcpt_valid := mem_xcpt_valids.reduce(_||_) mem_xcpt_cause := mem_xcpt_causes(0) mem_xcpt_uop := mem_xcpt_uops(0) mem_xcpt_vaddr := mem_xcpt_vaddrs(0) var xcpt_found = mem_xcpt_valids(0) var oldest_xcpt_rob_idx = mem_xcpt_uops(0).rob_idx for (w <- 1 until lsuWidth) { val is_older = WireInit(false.B) when (mem_xcpt_valids(w) && (IsOlder(mem_xcpt_uops(w).rob_idx, oldest_xcpt_rob_idx, io.core.rob_head_idx) || !xcpt_found)) { is_older := true.B mem_xcpt_cause := mem_xcpt_causes(w) mem_xcpt_uop := mem_xcpt_uops(w) mem_xcpt_vaddr := mem_xcpt_vaddrs(w) } xcpt_found = xcpt_found || mem_xcpt_valids(w) oldest_xcpt_rob_idx = Mux(is_older, mem_xcpt_uops(w).rob_idx, oldest_xcpt_rob_idx) } val exe_tlb_miss = widthMap(w => dtlb.io.req(w).valid && (dtlb.io.resp(w).miss || !dtlb.io.req(w).ready)) val exe_tlb_paddr = widthMap(w => Cat(dtlb.io.resp(w).paddr(paddrBits-1,corePgIdxBits), exe_tlb_vaddr(w)(corePgIdxBits-1,0))) val exe_tlb_uncacheable = widthMap(w => !(dtlb.io.resp(w).cacheable)) for (w <- 0 until lsuWidth) { assert (exe_tlb_paddr(w) === dtlb.io.resp(w).paddr, "[lsu] paddrs should match.") when (mem_xcpt_valids(w)) { assert(RegNext(will_fire_load_agen_exec(w) || will_fire_load_agen(w) || will_fire_store_agen(w) || will_fire_load_retry(w) || will_fire_store_retry(w))) // Technically only faulting AMOs need this assert(mem_xcpt_uops(w).uses_ldq ^ mem_xcpt_uops(w).uses_stq) when (mem_xcpt_uops(w).uses_ldq) { ldq_uop(mem_xcpt_uops(w).ldq_idx).exception := true.B } .otherwise { stq_uop(mem_xcpt_uops(w).stq_idx).exception := true.B } } } val exe_agen_killed = widthMap(w => IsKilledByBranch(io.core.brupdate, io.core.exception, agen(w).bits.uop)) //------------------------------ // Issue Someting to Memory // // A memory op can come from many different places // The address either was freshly translated, or we are // reading a physical address from the LDQ,STQ, or the HellaCache adapter // defaults io.dmem.brupdate := io.core.brupdate io.dmem.exception := io.core.exception io.dmem.rob_head_idx := io.core.rob_head_idx io.dmem.rob_pnr_idx := io.core.rob_pnr_idx val dmem_req = Wire(Vec(lsuWidth, Valid(new BoomDCacheReq))) io.dmem.req.valid := dmem_req.map(_.valid).reduce(_||_) io.dmem.req.bits := dmem_req val dmem_req_fire = widthMap(w => dmem_req(w).valid && io.dmem.req.fire) val s0_executing_loads = WireInit(VecInit((0 until numLdqEntries).map(x=>false.B))) val s0_kills = Wire(Vec(lsuWidth, Bool())) for (w <- 0 until lsuWidth) { dmem_req(w).valid := false.B dmem_req(w).bits.uop := NullMicroOp dmem_req(w).bits.addr := 0.U dmem_req(w).bits.data := 0.U dmem_req(w).bits.is_hella := false.B s0_kills(w) := false.B io.dmem.s1_kill(w) := RegNext(s0_kills(w) && dmem_req_fire(w)) when (will_fire_load_agen_exec(w)) { dmem_req(w).valid := true.B dmem_req(w).bits.addr := exe_tlb_paddr(w) dmem_req(w).bits.uop := exe_tlb_uop(w) s0_kills(w) := exe_tlb_miss(w) || exe_tlb_uncacheable(w) || ma_ld(w) || ae_ld(w) || pf_ld(w) s0_executing_loads(ldq_incoming_idx(w)) := dmem_req_fire(w) && !s0_kills(w) assert(!ldq_incoming_e(w).bits.executed) } .elsewhen (will_fire_load_retry(w)) { dmem_req(w).valid := true.B dmem_req(w).bits.addr := exe_tlb_paddr(w) dmem_req(w).bits.uop := exe_tlb_uop(w) s0_kills(w) := exe_tlb_miss(w) || exe_tlb_uncacheable(w) || ma_ld(w) || ae_ld(w) || pf_ld(w) s0_executing_loads(ldq_retry_idx) := dmem_req_fire(w) && !s0_kills(w) } .elsewhen (will_fire_store_commit_slow(w) || will_fire_store_commit_fast(w)) { dmem_req(w).valid := true.B dmem_req(w).bits.addr := stq_commit_e.bits.addr.bits dmem_req(w).bits.data := (new freechips.rocketchip.rocket.StoreGen( stq_commit_e.bits.uop.mem_size, 0.U, stq_commit_e.bits.data.bits, coreDataBytes)).data dmem_req(w).bits.uop := stq_commit_e.bits.uop when (!dmem_req_fire(w)) { stq_execute_queue_flush := true.B stq_execute_head := stq_commit_e.bits.uop.stq_idx } stq_succeeded(stq_commit_e.bits.uop.stq_idx) := false.B } .elsewhen (will_fire_load_wakeup(w)) { dmem_req(w).valid := true.B dmem_req(w).bits.addr := ldq_wakeup_e.bits.addr.bits dmem_req(w).bits.uop := ldq_wakeup_e.bits.uop s0_executing_loads(ldq_wakeup_idx) := dmem_req_fire(w) assert(!ldq_wakeup_e.bits.executed && !ldq_wakeup_e.bits.addr_is_virtual) } .elsewhen (will_fire_hella_incoming(w)) { assert(hella_state === h_s1) dmem_req(w).valid := !io.hellacache.s1_kill && (!exe_tlb_miss(w) || hella_req.phys) dmem_req(w).bits.addr := exe_tlb_paddr(w) dmem_req(w).bits.data := (new freechips.rocketchip.rocket.StoreGen( hella_req.size, 0.U, io.hellacache.s1_data.data, coreDataBytes)).data dmem_req(w).bits.uop.mem_cmd := hella_req.cmd dmem_req(w).bits.uop.mem_size := hella_req.size dmem_req(w).bits.uop.mem_signed := hella_req.signed dmem_req(w).bits.is_hella := true.B hella_paddr := exe_tlb_paddr(w) } .elsewhen (will_fire_hella_wakeup(w)) { assert(hella_state === h_replay) dmem_req(w).valid := true.B dmem_req(w).bits.addr := hella_paddr dmem_req(w).bits.data := (new freechips.rocketchip.rocket.StoreGen( hella_req.size, 0.U, hella_data.data, coreDataBytes)).data dmem_req(w).bits.uop.mem_cmd := hella_req.cmd dmem_req(w).bits.uop.mem_size := hella_req.size dmem_req(w).bits.uop.mem_signed := hella_req.signed dmem_req(w).bits.is_hella := true.B } //------------------------------------------------------------- // Write Addr into the LAQ/SAQ when (will_fire_load_agen(w) || will_fire_load_agen_exec(w) || will_fire_load_retry(w)) { val ldq_idx = Mux(will_fire_load_agen(w) || will_fire_load_agen_exec(w), ldq_incoming_idx(w), ldq_retry_idx) ldq_addr (ldq_idx).valid := !exe_agen_killed(w) || will_fire_load_retry(w) ldq_addr (ldq_idx).bits := Mux(exe_tlb_miss(w), exe_tlb_vaddr(w), exe_tlb_paddr(w)) ldq_ld_byte_mask (ldq_idx) := GenByteMask(exe_tlb_vaddr(w), exe_tlb_uop(w).mem_size) ldq_uop (ldq_idx).pdst := exe_tlb_uop(w).pdst ldq_addr_is_virtual (ldq_idx) := exe_tlb_miss(w) ldq_addr_is_uncacheable(ldq_idx) := exe_tlb_uncacheable(w) && !exe_tlb_miss(w) assert(!ldq_addr(ldq_idx).valid, "[lsu] Translating load is overwriting a valid address") } when (will_fire_store_agen(w) || will_fire_store_retry(w)) { val stq_idx = Mux(will_fire_store_agen(w), stq_incoming_idx(w), stq_retry_idx) stq_addr (stq_idx).valid := (!exe_agen_killed(w) || will_fire_store_retry(w)) && !pf_st(w) // Prevent AMOs from executing! stq_addr (stq_idx).bits := Mux(exe_tlb_miss(w), exe_tlb_vaddr(w), exe_tlb_paddr(w)) stq_uop (stq_idx).pdst := exe_tlb_uop(w).pdst // Needed for AMOs stq_addr_is_virtual(stq_idx) := exe_tlb_miss(w) assert(!stq_addr(stq_idx).valid, "[lsu] Translating store is overwriting a valid address") } } //------------------------------------------------------------- // Write data into the STQ for (dgen <- io.core.dgen) { when (dgen.valid) { val sidx = dgen.bits.uop.stq_idx //val stq_e = WireInit(stq(sidx)) stq_data(sidx).valid := true.B stq_data(sidx).bits := dgen.bits.data assert(!stq_data(sidx).valid || (stq_data(sidx).bits === dgen.bits.data)) } } require (xLen >= fLen) // for correct SDQ size //------------------------------------------------------------- // Speculative wakeup if loadUseDelay == 4 val wakeupArbs = Seq.fill(lsuWidth) { Module(new Arbiter(new Wakeup, 2)) } for (w <- 0 until lsuWidth) { wakeupArbs(w).io.out.ready := true.B wakeupArbs(w).io.in := DontCare io.core.iwakeups(w) := wakeupArbs(w).io.out if (enableFastLoadUse) { wakeupArbs(w).io.in(1).valid := (will_fire_load_agen_exec(w) && dmem_req_fire(w) && !agen(w).bits.uop.fp_val) wakeupArbs(w).io.in(1).bits.uop := agen(w).bits.uop wakeupArbs(w).io.in(1).bits.bypassable := true.B wakeupArbs(w).io.in(1).bits.speculative_mask := 0.U wakeupArbs(w).io.in(1).bits.rebusy := false.B } } //------------------------------------------------------------- //------------------------------------------------------------- // Cache Access Cycle (Mem) //------------------------------------------------------------- //------------------------------------------------------------- // Note the DCache may not have accepted our request val fired_load_agen_exec = widthMap(w => RegNext(will_fire_load_agen_exec(w) && !exe_agen_killed(w))) val fired_load_agen = widthMap(w => RegNext(will_fire_load_agen (w) && !exe_agen_killed(w))) val fired_store_agen = widthMap(w => RegNext(will_fire_store_agen (w) && !exe_agen_killed(w))) val fired_sfence = RegNext(will_fire_sfence) val fired_release = RegNext(will_fire_release) val fired_load_retry = widthMap(w => RegNext(will_fire_load_retry (w) && !IsKilledByBranch(io.core.brupdate, io.core.exception, retry_queue.io.deq.bits.uop))) val fired_store_retry = widthMap(w => RegNext(will_fire_store_retry (w) && !IsKilledByBranch(io.core.brupdate, io.core.exception, retry_queue.io.deq.bits.uop))) val fired_store_commit = widthMap(w => RegNext(will_fire_store_commit_slow(w) || will_fire_store_commit_fast(w))) val fired_load_wakeup = widthMap(w => RegNext(will_fire_load_wakeup (w) && !IsKilledByBranch(io.core.brupdate, io.core.exception, ldq_wakeup_e.bits.uop))) val fired_hella_incoming = RegNext(will_fire_hella_incoming) val fired_hella_wakeup = RegNext(will_fire_hella_wakeup) val mem_incoming_uop = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, agen(w).bits.uop))) val mem_ldq_incoming_e = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, io.core.exception, ldq_incoming_e(w)))) val mem_stq_incoming_e = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, io.core.exception, stq_incoming_e(w)))) val mem_ldq_wakeup_e = RegNext(UpdateBrMask(io.core.brupdate, io.core.exception, ldq_wakeup_e)) val mem_ldq_retry_e = RegNext(UpdateBrMask(io.core.brupdate, io.core.exception, ldq_read(ldq_retry_idx))) val mem_stq_retry_e = RegNext(UpdateBrMask(io.core.brupdate, io.core.exception, stq_read(stq_retry_idx))) val mem_ldq_e = widthMap(w => Mux(fired_load_agen (w) || fired_load_agen_exec(w), mem_ldq_incoming_e(w), Mux(fired_load_retry (w), mem_ldq_retry_e, Mux(fired_load_wakeup (w), mem_ldq_wakeup_e, (0.U).asTypeOf(Valid(new LDQEntry)))))) val mem_stq_e = widthMap(w => Mux(fired_store_agen (w), mem_stq_incoming_e(w), Mux(fired_store_retry(w), mem_stq_retry_e, (0.U).asTypeOf(Valid(new STQEntry))))) val mem_tlb_miss = RegNext(exe_tlb_miss) val mem_tlb_uncacheable = RegNext(exe_tlb_uncacheable) val mem_paddr = RegNext(widthMap(w => dmem_req(w).bits.addr)) // Task 1: Clr ROB busy bit val stq_clr_head_idx = RegNext(AgePriorityEncoder((0 until numStqEntries).map(i => { stq_valid(i) && !stq_cleared(i) }), stq_commit_head)) var clr_idx = stq_clr_head_idx for (i <- 0 until coreWidth) { // Delay store clearing 1 extra cycle, as the store clear can't occur // too quickly before an order fail caused by this store propagates to ROB val clr_valid = Reg(Bool()) clr_valid := false.B val clr_uop = Reg(new MicroOp) val clr_valid_1 = RegNext(clr_valid && !IsKilledByBranch(io.core.brupdate, io.core.exception, clr_uop)) val clr_uop_1 = RegNext(UpdateBrMask(io.core.brupdate, clr_uop)) //val stq_e = WireInit(stq(clr_idx)) val s_uop = WireInit(stq_uop(clr_idx)) when ( stq_valid (clr_idx) && stq_addr (clr_idx).valid && stq_data (clr_idx).valid && !stq_addr_is_virtual (clr_idx) && !s_uop.is_amo && !stq_cleared (clr_idx) && !IsKilledByBranch(io.core.brupdate, io.core.exception, s_uop)) { clr_valid := true.B clr_uop := UpdateBrMask(io.core.brupdate, s_uop) stq_cleared(clr_idx) := true.B } io.core.clr_bsy(i).valid := clr_valid_1 && !IsKilledByBranch(io.core.brupdate, io.core.exception, clr_uop_1) io.core.clr_bsy(i).bits := clr_uop_1.rob_idx clr_idx = WrapInc(clr_idx, numStqEntries) } // Task 2: Do LD-LD. ST-LD searches for ordering failures // Do LD-ST search for forwarding opportunities // We have the opportunity to kill a request we sent last cycle. Use it wisely! // We translated a store last cycle val do_st_search = widthMap(w => (fired_store_agen(w) || fired_store_retry(w)) && !mem_tlb_miss(w)) // We translated a load last cycle val do_ld_search = widthMap(w => ((fired_load_agen(w) || fired_load_agen_exec(w) || fired_load_retry(w)) && !mem_tlb_miss(w)) || fired_load_wakeup(w)) // We are making a local line visible to other harts val do_release_search = widthMap(w => fired_release(w)) // Store addrs don't go to memory yet, get it from the TLB response // Load wakeups don't go through TLB, get it through memory // Load incoming and load retries go through both val lcam_addr = widthMap(w => Mux(fired_store_agen(w) || fired_store_retry(w) || fired_load_agen(w) || fired_load_agen_exec(w), RegNext(exe_tlb_paddr(w)), Mux(fired_release(w), RegNext(io.dmem.release.bits.address), mem_paddr(w)))) val lcam_uop = widthMap(w => Mux(do_st_search(w), mem_stq_e(w).bits.uop, Mux(do_ld_search(w), mem_ldq_e(w).bits.uop, NullMicroOp))) val lcam_mask = widthMap(w => GenByteMask(lcam_addr(w), lcam_uop(w).mem_size)) val lcam_st_dep_mask = widthMap(w => mem_ldq_e(w).bits.st_dep_mask) val lcam_is_release = widthMap(w => fired_release(w)) val lcam_ldq_idx = widthMap(w => Mux(fired_load_agen (w) || fired_load_agen_exec(w) , mem_incoming_uop(w).ldq_idx, Mux(fired_load_wakeup (w), RegNext(ldq_wakeup_idx), Mux(fired_load_retry (w), RegNext(ldq_retry_idx), 0.U)))) val lcam_stq_idx = widthMap(w => Mux(fired_store_agen (w), mem_incoming_uop(w).stq_idx, Mux(fired_store_retry(w), RegNext(stq_retry_idx), 0.U))) val lcam_younger_load_mask = widthMap(w => IsYoungerMask(lcam_ldq_idx(w), ldq_head, numLdqEntries)) val can_forward = widthMap(w => Mux(fired_load_agen(w) || fired_load_agen_exec(w) || fired_load_retry(w), !mem_tlb_uncacheable(w), !mem_ldq_wakeup_e.bits.addr_is_uncacheable )) val kill_forward = WireInit(widthMap(w => false.B)) // Mask of stores which we conflict on address with val ldst_addr_matches = Wire(Vec(lsuWidth, UInt(numStqEntries.W))) // Mask of stores which we can forward from val ldst_forward_matches = Wire(Vec(lsuWidth, UInt(numStqEntries.W))) // Mask of stores we can forward to val stld_prs2_matches = Wire(Vec(lsuWidth, UInt(numStqEntries.W))) val s1_executing_loads = RegNext(s0_executing_loads) val s1_set_execute = WireInit(s1_executing_loads) val wb_ldst_forward_matches = RegNext(ldst_forward_matches) val wb_ldst_forward_valid = Wire(Vec(lsuWidth, Bool())) val wb_ldst_forward_e = widthMap(w => RegNext(UpdateBrMask(io.core.brupdate, ldq_read(lcam_ldq_idx(w)).bits))) val wb_ldst_forward_ldq_idx = RegNext(lcam_ldq_idx) val wb_ldst_forward_ld_addr = RegNext(lcam_addr) val wb_ldst_forward_stq_idx = Wire(Vec(lsuWidth, UInt(stqAddrSz.W))) val failed_load = WireInit(false.B) for (i <- 0 until numLdqEntries) { // val l_bits = ldq(i).bits val l_valid = ldq_valid(i) val l_addr = ldq_addr(i) val l_addr_is_virtual = ldq_addr_is_virtual(i) val l_executed = ldq_executed(i) val l_succeeded = ldq_succeeded(i) val l_will_succeed = ldq_will_succeed(i) val l_observed = ldq_observed(i) val l_mask = ldq_ld_byte_mask(i) val l_st_dep_mask = ldq_st_dep_mask(i) val l_uop = WireInit(ldq_uop(i)) val l_forward_std_val = ldq_forward_std_val(i) val l_forward_stq_idx = ldq_forward_stq_idx(i) val block_addr_matches = widthMap(w => lcam_addr(w) >> blockOffBits === l_addr.bits >> blockOffBits) val dword_addr_matches = widthMap(w => block_addr_matches(w) && lcam_addr(w)(blockOffBits-1,3) === l_addr.bits(blockOffBits-1,3)) val mask_match = widthMap(w => (l_mask & lcam_mask(w)) === l_mask) val mask_overlap = widthMap(w => (l_mask & lcam_mask(w)).orR) // Searcher is a store for (w <- 0 until lsuWidth) { when ( do_release_search(w) && l_valid && l_addr.valid && !l_addr_is_virtual && block_addr_matches(w)) { // This load has been observed, so if a younger load to the same address has not // executed yet, this load must be squashed ldq_observed(i) := true.B } when ( do_st_search(w) && l_valid && l_addr.valid && (l_executed || l_succeeded) && !l_addr_is_virtual && l_st_dep_mask(lcam_stq_idx(w)) && dword_addr_matches(w) && mask_overlap(w)) { val forwarded_is_older = IsOlder(l_forward_stq_idx, lcam_stq_idx(w), l_uop.stq_idx) // We are older than this load, which overlapped us. when (!l_forward_std_val || // If the load wasn't forwarded, it definitely failed ((l_forward_stq_idx =/= lcam_stq_idx(w)) && forwarded_is_older)) { // If the load forwarded from us, we might be ok ldq_order_fail(i) := true.B failed_load := true.B } } when ( do_ld_search(w) && l_valid && l_addr.valid && !l_addr_is_virtual && dword_addr_matches(w) && mask_overlap(w)) { val searcher_is_older = lcam_younger_load_mask(w)(i) assert(IsOlder(lcam_ldq_idx(w), i.U, ldq_head) === searcher_is_older) when (searcher_is_older) { when ((l_executed || l_succeeded) && !s1_executing_loads(i) && // If the load is proceeding in parallel we don't need to kill it l_observed) { // Its only a ordering failure if the cache line was observed between the younger load and us //ldq_order_fail(i) := true.B // failed_load := true.B // this case is no longer possible where an older search gets replayed and found that a younger search has already been released. Note that this is stronger than rvwmo. assert(false.B) } } .elsewhen (lcam_ldq_idx(w) =/= i.U) { // The load is older, and it wasn't executed // we need to kill ourselves, and prevent forwarding // can also forward from the next cycle when (!(l_executed && (l_succeeded || l_will_succeed))) { s1_set_execute(lcam_ldq_idx(w)) := false.B when (RegNext(dmem_req_fire(w) && !s0_kills(w)) && !fired_load_agen(w)) { io.dmem.s1_kill(w) := true.B } kill_forward(w) := true.B } } } } } for (wi <- 0 until lsuWidth) { for (w <- 0 until lsuWidth) { // A younger load might find a older nacking load // this is not the full case, need also to account for the situation where the older load already went to sleep above val nack_dword_addr_matches = (lcam_addr(w) >> 3) === (io.dmem.nack(wi).bits.addr >> 3) val nack_mask = GenByteMask(io.dmem.nack(wi).bits.addr, io.dmem.nack(wi).bits.uop.mem_size) val nack_mask_overlap = (nack_mask & lcam_mask(w)) =/= 0.U when (do_ld_search(w) && io.dmem.nack(wi).valid && io.dmem.nack(wi).bits.uop.uses_ldq && nack_dword_addr_matches && nack_mask_overlap && IsOlder(io.dmem.nack(wi).bits.uop.ldq_idx, lcam_ldq_idx(w), ldq_head)) { s1_set_execute(lcam_ldq_idx(w)) := false.B when (RegNext(dmem_req_fire(w) && !s0_kills(w)) && !fired_load_agen(w)) { io.dmem.s1_kill(w) := true.B } kill_forward(w) := true.B } // A older load might find a younger forwarding load val forward_dword_addr_matches = (lcam_addr(w) >> 3 === wb_ldst_forward_ld_addr(wi) >> 3) val forward_mask = GenByteMask(wb_ldst_forward_ld_addr(wi), wb_ldst_forward_e(wi).uop.mem_size) val forward_mask_overlap = (forward_mask & lcam_mask(w)) =/= 0.U when (do_ld_search(w) && wb_ldst_forward_valid(wi) && forward_dword_addr_matches && forward_mask_overlap && wb_ldst_forward_e(wi).observed && IsOlder(lcam_ldq_idx(w), wb_ldst_forward_ldq_idx(wi), ldq_head)) { ldq_order_fail(wb_ldst_forward_ldq_idx(wi)) := true.B failed_load := true.B } // A older store might find a younger load which is forwarding from the wrong place val forwarded_is_older = IsOlder(wb_ldst_forward_stq_idx(wi), lcam_stq_idx(w), wb_ldst_forward_e(wi).uop.stq_idx) when (do_st_search(w) && wb_ldst_forward_valid(wi) && forward_dword_addr_matches && forward_mask_overlap && wb_ldst_forward_e(wi).st_dep_mask(lcam_stq_idx(w)) && forwarded_is_older) { ldq_order_fail(wb_ldst_forward_ldq_idx(wi)) := true.B failed_load := true.B } } } val addr_matches = Wire(Vec(lsuWidth, Vec(numStqEntries, Bool()))) val forward_matches = Wire(Vec(lsuWidth, Vec(numStqEntries, Bool()))) val prs2_matches = Wire(Vec(lsuWidth, Vec(numStqEntries, Bool()))) for (i <- 0 until numStqEntries) { //val s_e = WireInit(stq(i)) val s_addr = stq_addr(i) val s_data = stq_data(i) val s_addr_is_virtual = stq_addr_is_virtual(i) val s_uop = WireInit(stq_uop(i)) val write_mask = GenByteMask(s_addr.bits, s_uop.mem_size) for (w <- 0 until lsuWidth) { val dword_addr_matches = ( s_addr.valid && !s_uop.is_amo && !s_addr_is_virtual && (s_addr.bits(corePAddrBits-1,3) === lcam_addr(w)(corePAddrBits-1,3))) val mask_union = lcam_mask(w) & write_mask addr_matches(w)(i) := (mask_union =/= 0.U) && dword_addr_matches forward_matches(w)(i) := addr_matches(w)(i) && s_data.valid && (mask_union === lcam_mask(w)) prs2_matches(w)(i) := (s_uop.prs2 === lcam_uop(w).pdst && s_uop.lrs2_rtype === RT_FIX && enableLoadToStoreForwarding.B) } } val fast_stq_valids = stq_valid.asUInt for (w <- 0 until lsuWidth) { ldst_addr_matches(w) := (addr_matches(w).asUInt & lcam_st_dep_mask(w)) & fast_stq_valids ldst_forward_matches(w) := (forward_matches(w).asUInt & lcam_st_dep_mask(w)) & fast_stq_valids stld_prs2_matches(w) := (prs2_matches(w).asUInt & ~lcam_st_dep_mask(w)) & fast_stq_valids } val stq_amos = VecInit(stq_uop.map(u => u.is_fence || u.is_amo)) for (w <- 0 until lsuWidth) { val has_older_amo = (stq_amos.asUInt & lcam_st_dep_mask(w)) =/= 0.U when (do_ld_search(w) && (has_older_amo || (ldst_addr_matches(w) =/= 0.U))) { when (RegNext(dmem_req_fire(w) && !s0_kills(w)) && !fired_load_agen(w)) { io.dmem.s1_kill(w) := true.B } s1_set_execute(lcam_ldq_idx(w)) := false.B when (has_older_amo) { kill_forward(w) := true.B } } } // Set execute bit in LDQ for (i <- 0 until numLdqEntries) { when (s1_set_execute(i)) { ldq_executed(i) := true.B } } // Find the youngest store which the load is dependent on for (w <- 0 until lsuWidth) { val (youngest_matching, match_found) = ForwardingAgeLogic(numStqEntries, ldst_addr_matches(w), lcam_uop(w).stq_idx) val (youngest_forwarder, forwarder_found) = ForwardingAgeLogic(numStqEntries, ldst_forward_matches(w), lcam_uop(w).stq_idx) wb_ldst_forward_stq_idx(w) := youngest_forwarder // Forward if st-ld forwarding is possible from the writemask and loadmask wb_ldst_forward_valid(w) := (youngest_matching === youngest_forwarder && match_found && forwarder_found && RegNext(can_forward(w) && !kill_forward(w) && do_ld_search(w)) && !RegNext(IsKilledByBranch(io.core.brupdate, io.core.exception, lcam_uop(w)))) } // Avoid deadlock with a 1-w LSU prioritizing load wakeups > store commits // On a 2W machine, load wakeups and store commits occupy separate pipelines, // so only add this logic for 1-w LSU if (lsuWidth == 1) { // Wakeups may repeatedly find a st->ld addr conflict and fail to forward, // repeated wakeups may block the store from ever committing // Disallow load wakeups 1 cycle after this happens to allow the stores to drain when (RegNext(ldst_addr_matches(0) =/= 0.U) && !wb_ldst_forward_valid(0)) { block_load_wakeup := true.B } // If stores remain blocked for 15 cycles, block load wakeups to get a store through val store_blocked_counter = Reg(UInt(4.W)) when (will_fire_store_commit_fast(0) || will_fire_store_commit_slow(0) || !can_fire_store_commit_slow(0)) { store_blocked_counter := 0.U } .elsewhen (can_fire_store_commit_slow(0) && !(will_fire_store_commit_slow(0) || will_fire_store_commit_fast(0))) { store_blocked_counter := Mux(store_blocked_counter === 15.U, 15.U, store_blocked_counter + 1.U) } when (store_blocked_counter === 15.U) { block_load_wakeup := true.B } } // Forward this load to the youngest matching store val mem_stld_forward_stq_idx = widthMap(w => AgePriorityEncoder(stld_prs2_matches(w).asBools, lcam_uop(w).stq_idx)) val mem_stld_forward_valid = widthMap(w => do_ld_search(w) && stld_prs2_matches(w)(mem_stld_forward_stq_idx(w))) // Task 3: Clr unsafe bit in ROB for succesful t_ranslations // Delay this a cycle to avoid going ahead of the exception broadcast // The unsafe bit is cleared on the first translation, so no need to fire for load wakeups for (w <- 0 until lsuWidth) { io.core.clr_unsafe(w).valid := ( RegNext(do_st_search(w)) || (!io.dmem.nack(w).valid && RegNext(do_ld_search(w) && !fired_load_agen(w) && !io.dmem.s1_kill(w) && RegNext(dmem_req_fire(w)))) ) && !RegNext(failed_load) io.core.clr_unsafe(w).bits := RegNext(lcam_uop(w).rob_idx) } // detect which loads get marked as failures, but broadcast to the ROB the oldest failing load // TODO encapsulate this in an age-based priority-encoder val l_idx = AgePriorityEncoder((0 until numLdqEntries).map { i => ldq_valid(i) && ldq_order_fail(i) }, ldq_head) // one exception port, but multiple causes! // - 1) the incoming store-address finds a faulting load (it is by definition younger) // - 2) the incoming load or store address is excepting. It must be older and thus takes precedent. val r_xcpt_valid = RegInit(false.B) val r_xcpt = Reg(new Exception) val ld_xcpt_valid = (ldq_order_fail.asUInt & ldq_valid.asUInt) =/= 0.U val ld_xcpt_uop = WireInit(ldq_uop(Mux(l_idx >= numLdqEntries.U, l_idx - numLdqEntries.U, l_idx))) val use_mem_xcpt = (mem_xcpt_valid && IsOlder(mem_xcpt_uop.rob_idx, ld_xcpt_uop.rob_idx, io.core.rob_head_idx)) || !ld_xcpt_valid val xcpt_uop = Mux(use_mem_xcpt, mem_xcpt_uop, ld_xcpt_uop) r_xcpt_valid := (ld_xcpt_valid || mem_xcpt_valid) && !IsKilledByBranch(io.core.brupdate, io.core.exception, xcpt_uop) r_xcpt.uop := xcpt_uop r_xcpt.uop.br_mask := GetNewBrMask(io.core.brupdate, xcpt_uop) r_xcpt.cause := Mux(use_mem_xcpt, mem_xcpt_cause, MINI_EXCEPTION_MEM_ORDERING) r_xcpt.badvaddr := mem_xcpt_vaddr // TODO is there another register we can use instead? io.core.lxcpt.valid := r_xcpt_valid && !IsKilledByBranch(io.core.brupdate, io.core.exception, r_xcpt.uop) io.core.lxcpt.bits := r_xcpt // Task 4: Speculatively wakeup loads 1 cycle before they come back // if loadUseDelay == 5 for (w <- 0 until lsuWidth) { if (!enableFastLoadUse) { wakeupArbs(w).io.in(1).valid := (fired_load_agen_exec(w) && RegNext(dmem_req_fire(w)) && !io.dmem.s1_nack_advisory(w) && !mem_incoming_uop(w).fp_val) wakeupArbs(w).io.in(1).bits.uop := mem_incoming_uop(w) wakeupArbs(w).io.in(1).bits.bypassable := true.B wakeupArbs(w).io.in(1).bits.speculative_mask := 0.U wakeupArbs(w).io.in(1).bits.rebusy := false.B } } //------------------------------------------------------------- //------------------------------------------------------------- // Writeback Cycle (St->Ld Forwarding Path) //------------------------------------------------------------- //------------------------------------------------------------- // Handle Memory Responses and nacks //---------------------------------- val iresp = Wire(Vec(lsuWidth, Valid(new ExeUnitResp(xLen)))) val fresp = Wire(Vec(lsuWidth, Valid(new ExeUnitResp(xLen)))) for (w <- 0 until lsuWidth) { iresp(w).valid := false.B iresp(w).bits := DontCare fresp(w).valid := false.B fresp(w).bits := DontCare io.core.iresp(w) := (if (enableFastLoadUse) iresp(w) else RegNext( UpdateBrMask(io.core.brupdate, io.core.exception, iresp(w)))) io.core.fresp(w) := fresp(w) } // Initially assume the speculative load wakeup failed val wb_spec_wakeups = Wire(Vec(lsuWidth, Valid(new Wakeup))) val spec_wakeups = Wire(Vec(lsuWidth, Valid(new Wakeup))) val wb_slow_wakeups = Wire(Vec(lsuWidth, Valid(new Wakeup))) val slow_wakeups = Wire(Vec(lsuWidth, Valid(new Wakeup))) val dmem_resp_fired = WireInit(widthMap(w => false.B)) for (w <- 0 until lsuWidth) { val w1 = Reg(Valid(new Wakeup)) w1.valid := wakeupArbs(w).io.in(1).fire && !IsKilledByBranch(io.core.brupdate, io.core.exception, wakeupArbs(w).io.in(1).bits) w1.bits := UpdateBrMask(io.core.brupdate, wakeupArbs(w).io.in(1).bits) val w2 = Reg(Valid(new Wakeup)) w2.valid := w1.valid && !IsKilledByBranch(io.core.brupdate, io.core.exception, w1.bits) w2.bits := UpdateBrMask(io.core.brupdate, w1.bits) spec_wakeups(w).valid := w2.valid spec_wakeups(w).bits := w2.bits wb_spec_wakeups(w) := (if (enableFastLoadUse) w2 else w1) } for (w <- 0 until lsuWidth) { wb_slow_wakeups(w).valid := false.B wb_slow_wakeups(w).bits := DontCare // Handle nacks when (io.dmem.nack(w).valid) { when (io.dmem.nack(w).bits.is_hella) { assert(hella_state === h_wait || hella_state === h_dead) } .elsewhen (io.dmem.nack(w).bits.uop.uses_ldq) { assert(ldq_executed(io.dmem.nack(w).bits.uop.ldq_idx)) ldq_executed(io.dmem.nack(w).bits.uop.ldq_idx) := false.B } .otherwise { assert(io.dmem.nack(w).bits.uop.uses_stq) when (IsOlder(io.dmem.nack(w).bits.uop.stq_idx, stq_execute_head, stq_head)) { stq_execute_queue_flush := true.B stq_execute_head := io.dmem.nack(w).bits.uop.stq_idx } } } // Handle the response val resp = Mux(!io.dmem.resp(w).valid && (w == lsuWidth-1).B, io.dmem.ll_resp.bits, io.dmem.resp(w).bits) if (w == lsuWidth-1) { io.dmem.ll_resp.ready := !io.dmem.resp(w).valid && !wb_spec_wakeups(w).valid } when (io.dmem.resp(w).valid || ((w == lsuWidth-1).B && io.dmem.ll_resp.fire)) { when (resp.uop.uses_ldq) { assert(!resp.is_hella) val ldq_idx = resp.uop.ldq_idx val uop = WireInit(ldq_uop(ldq_idx)) val send_iresp = uop.dst_rtype === RT_FIX val send_fresp = uop.dst_rtype === RT_FLT iresp(w).bits.uop := uop fresp(w).bits.uop := uop iresp(w).valid := send_iresp iresp(w).bits.data := resp.data fresp(w).valid := send_fresp fresp(w).bits.data := resp.data assert(send_iresp ^ send_fresp) dmem_resp_fired(w) := true.B ldq_will_succeed (ldq_idx) := iresp(w).valid || fresp(w).valid ldq_debug_wb_data(ldq_idx) := resp.data wb_slow_wakeups(w).valid := send_iresp wb_slow_wakeups(w).bits.uop := uop wb_slow_wakeups(w).bits.speculative_mask := 0.U wb_slow_wakeups(w).bits.rebusy := false.B wb_slow_wakeups(w).bits.bypassable := false.B } when (resp.uop.uses_stq) { val uop = stq_uop(resp.uop.stq_idx) assert(!resp.is_hella && resp.uop.is_amo) dmem_resp_fired(w) := true.B iresp(w).valid := true.B iresp(w).bits.uop := uop iresp(w).bits.data := resp.data wb_slow_wakeups(w).valid := true.B wb_slow_wakeups(w).bits.uop := uop wb_slow_wakeups(w).bits.speculative_mask := 0.U wb_slow_wakeups(w).bits.rebusy := false.B wb_slow_wakeups(w).bits.bypassable := false.B stq_debug_wb_data(resp.uop.stq_idx) := resp.data } } // Handle store acks when (io.dmem.store_ack(w).valid) { stq_succeeded(io.dmem.store_ack(w).bits.uop.stq_idx) := true.B } when (dmem_resp_fired(w) && wb_ldst_forward_valid(w)) { // Twiddle thumbs. Can't forward because dcache response takes precedence } .elsewhen (!dmem_resp_fired(w) && wb_ldst_forward_valid(w)) { val f_idx = wb_ldst_forward_ldq_idx(w) val forward_uop = wb_ldst_forward_e(w).uop //val stq_e = WireInit(stq(wb_ldst_forward_stq_idx(w))) val s_idx = wb_ldst_forward_stq_idx(w) val storegen = new freechips.rocketchip.rocket.StoreGen( stq_uop(s_idx).mem_size, stq_addr(s_idx).bits, stq_data(s_idx).bits, coreDataBytes) val loadgen = new freechips.rocketchip.rocket.LoadGen( forward_uop.mem_size, forward_uop.mem_signed, wb_ldst_forward_ld_addr(w), storegen.data, false.B, coreDataBytes) wb_slow_wakeups(w).valid := forward_uop.dst_rtype === RT_FIX wb_slow_wakeups(w).bits.uop := forward_uop wb_slow_wakeups(w).bits.speculative_mask := 0.U wb_slow_wakeups(w).bits.rebusy := false.B wb_slow_wakeups(w).bits.bypassable := false.B iresp(w).valid := (forward_uop.dst_rtype === RT_FIX) fresp(w).valid := (forward_uop.dst_rtype === RT_FLT) iresp(w).bits.uop := forward_uop fresp(w).bits.uop := forward_uop iresp(w).bits.data := loadgen.data fresp(w).bits.data := loadgen.data ldq_will_succeed (f_idx) := true.B ldq_forward_std_val(f_idx) := true.B ldq_forward_stq_idx(f_idx) := wb_ldst_forward_stq_idx(w) ldq_debug_wb_data (f_idx) := loadgen.data } // Forward loads to store-data if (enableStLdForwarding) { when (iresp(w).valid && iresp(w).bits.uop.uses_ldq && iresp(w).bits.uop.ldq_idx === RegNext(lcam_ldq_idx(w)) && RegNext(mem_stld_forward_valid(w))) { assert(!stq_data(RegNext(mem_stld_forward_stq_idx(w))).valid) stq_data(RegNext(mem_stld_forward_stq_idx(w))).valid := true.B stq_data(RegNext(mem_stld_forward_stq_idx(w))).bits := iresp(w).bits.data } } // Do wakeups wakeupArbs(w).io.in(0).valid := false.B wakeupArbs(w).io.in(0).bits := DontCare slow_wakeups(w) := (if (enableFastLoadUse) wb_slow_wakeups(w) else RegNext(UpdateBrMask( io.core.brupdate, io.core.exception, wb_slow_wakeups(w)))) when (slow_wakeups(w).valid) { when (spec_wakeups(w).valid) { // Do nothing, since the slow wakeup matches a earlier speculative wakeup assert(slow_wakeups(w).bits.uop.ldq_idx === spec_wakeups(w).bits.uop.ldq_idx) } .otherwise { // This wasn't speculatively woken up, so end the late slow wakeup wakeupArbs(w).io.in(0).valid := slow_wakeups(w).valid wakeupArbs(w).io.in(0).bits := slow_wakeups(w).bits } } .otherwise { when (spec_wakeups(w).valid) { // This sent a speculative wakeup, now we need to undo it wakeupArbs(w).io.in(0).valid := spec_wakeups(w).valid wakeupArbs(w).io.in(0).bits := spec_wakeups(w).bits wakeupArbs(w).io.in(0).bits.rebusy := true.B } } } //------------------------------------------------------------- // Kill speculated entries on branch mispredict //------------------------------------------------------------- //------------------------------------------------------------- // Kill stores for (i <- 0 until numStqEntries) { when (stq_valid(i)) { val uop = WireInit(stq_uop(i)) stq_uop(i).br_mask := GetNewBrMask(io.core.brupdate, uop.br_mask) when (IsKilledByBranch(io.core.brupdate, false.B, uop)) { stq_valid(i) := false.B stq_addr (i).valid := false.B stq_data (i).valid := false.B } } assert (!(IsKilledByBranch(io.core.brupdate, false.B, stq_uop(i)) && stq_valid(i) && stq_committed(i)), "Branch is trying to clear a committed store.") } // Kill loads for (i <- 0 until numLdqEntries) { when (ldq_valid(i)) { val uop = WireInit(ldq_uop(i)) ldq_uop(i).br_mask := GetNewBrMask(io.core.brupdate, uop.br_mask) when (IsKilledByBranch(io.core.brupdate, io.core.exception, uop)) { ldq_valid(i) := false.B ldq_addr (i).valid := false.B } } } //------------------------------------------------------------- when (io.core.brupdate.b2.mispredict && !io.core.exception) { stq_tail := io.core.brupdate.b2.uop.stq_idx ldq_tail := io.core.brupdate.b2.uop.ldq_idx } //------------------------------------------------------------- //------------------------------------------------------------- // dequeue old entries on commit //------------------------------------------------------------- //------------------------------------------------------------- var temp_stq_commit_head = stq_commit_head var temp_ldq_head = ldq_head for (w <- 0 until coreWidth) { val commit_store = io.core.commit.valids(w) && io.core.commit.uops(w).uses_stq val commit_load = io.core.commit.valids(w) && io.core.commit.uops(w).uses_ldq // val stq_e = WireInit(stq(temp_stq_commit_head)) // val ldq_e = WireInit(ldq(temp_ldq_head)) val l_uop = WireInit(ldq_uop(temp_ldq_head)) val s_uop = WireInit(stq_uop(temp_stq_commit_head)) when (commit_store) { stq_committed (temp_stq_commit_head) := true.B stq_can_execute(temp_stq_commit_head) := true.B } .elsewhen (commit_load) { assert (ldq_valid(temp_ldq_head), "[lsu] trying to commit an un-allocated load entry.") assert ((ldq_executed(temp_ldq_head) || ldq_forward_std_val(temp_ldq_head)) && ldq_succeeded(temp_ldq_head), "[lsu] trying to commit an un-executed load entry.") ldq_valid(temp_ldq_head) := false.B } if (MEMTRACE_PRINTF) { when (commit_store || commit_load) { val uop = Mux(commit_store, s_uop, l_uop) val addr = Mux(commit_store, stq_addr(temp_stq_commit_head).bits , ldq_addr(temp_ldq_head).bits) val stdata = Mux(commit_store, stq_data(temp_stq_commit_head).bits , 0.U) val wbdata = Mux(commit_store, stq_debug_wb_data(temp_stq_commit_head), ldq_debug_wb_data(temp_ldq_head)) printf("MT %x %x %x %x %x %x %x\n", io.core.tsc_reg, 0.U, uop.mem_cmd, uop.mem_size, addr, stdata, wbdata) } } temp_stq_commit_head = Mux(commit_store, WrapInc(temp_stq_commit_head, numStqEntries), temp_stq_commit_head) temp_ldq_head = Mux(commit_load, WrapInc(temp_ldq_head, numLdqEntries), temp_ldq_head) } stq_commit_head := temp_stq_commit_head ldq_head := temp_ldq_head // store has been committed AND successfully sent data to memory val stq_head_is_fence = stq_uop(stq_head).is_fence when (stq_valid(stq_head) && stq_committed(stq_head)) { when (stq_head_is_fence && !io.dmem.ordered) { io.dmem.force_order := true.B store_needs_order := true.B } clear_store := Mux(stq_head_is_fence, io.dmem.ordered, stq_succeeded(stq_head)) } when (clear_store) { stq_valid(stq_head) := false.B stq_head := WrapInc(stq_head, numStqEntries) when (stq_head_is_fence) { stq_execute_head := WrapInc(stq_execute_head, numStqEntries) } } // ----------------------- // Hellacache interface // We need to time things like a HellaCache would io.hellacache.req.ready := false.B io.hellacache.s2_nack := false.B io.hellacache.s2_nack_cause_raw := false.B io.hellacache.s2_uncached := DontCare io.hellacache.s2_paddr := DontCare io.hellacache.s2_xcpt := (0.U).asTypeOf(new rocket.HellaCacheExceptions) io.hellacache.replay_next := false.B io.hellacache.s2_gpa := DontCare io.hellacache.s2_gpa_is_pte := DontCare io.hellacache.ordered := DontCare io.hellacache.perf := DontCare io.hellacache.clock_enabled := true.B io.hellacache.store_pending := stq_valid.reduce(_||_) io.hellacache.resp.valid := false.B io.hellacache.resp.bits.addr := hella_req.addr io.hellacache.resp.bits.tag := hella_req.tag io.hellacache.resp.bits.cmd := hella_req.cmd io.hellacache.resp.bits.signed := hella_req.signed io.hellacache.resp.bits.size := hella_req.size io.hellacache.resp.bits.mask := hella_req.mask io.hellacache.resp.bits.replay := false.B io.hellacache.resp.bits.has_data := true.B io.hellacache.resp.bits.data_word_bypass := io.dmem.ll_resp.bits.data io.hellacache.resp.bits.data_raw := io.dmem.ll_resp.bits.data io.hellacache.resp.bits.store_data := hella_req.data io.hellacache.resp.bits.dprv := io.ptw.status.prv io.hellacache.resp.bits.dv := io.ptw.status.v io.hellacache.resp.bits.data := io.dmem.ll_resp.bits.data when (hella_state === h_ready) { io.hellacache.req.ready := true.B when (io.hellacache.req.fire) { hella_req := io.hellacache.req.bits hella_state := h_s1 } } .elsewhen (hella_state === h_s1) { can_fire_hella_incoming(0) := true.B hella_data := io.hellacache.s1_data hella_xcpt := dtlb.io.resp(0) when (io.hellacache.s1_kill) { when (will_fire_hella_incoming(0) && dmem_req_fire(0)) { hella_state := h_dead } .otherwise { hella_state := h_ready } } .elsewhen (will_fire_hella_incoming(0) && dmem_req_fire(0)) { hella_state := h_s2 } .otherwise { hella_state := h_s2_nack } } .elsewhen (hella_state === h_s2_nack) { io.hellacache.s2_nack := true.B hella_state := h_ready } .elsewhen (hella_state === h_s2) { io.hellacache.s2_xcpt := hella_xcpt when (io.hellacache.s2_kill || hella_xcpt.asUInt =/= 0.U) { hella_state := h_dead } .otherwise { hella_state := h_wait } } .elsewhen (hella_state === h_wait) { when (io.dmem.ll_resp.fire && io.dmem.ll_resp.bits.is_hella) { hella_state := h_ready io.hellacache.resp.valid := true.B io.hellacache.resp.bits.addr := hella_req.addr io.hellacache.resp.bits.tag := hella_req.tag io.hellacache.resp.bits.cmd := hella_req.cmd io.hellacache.resp.bits.signed := hella_req.signed io.hellacache.resp.bits.size := hella_req.size io.hellacache.resp.bits.data := io.dmem.ll_resp.bits.data } for (w <- 0 until lsuWidth) { when ((io.dmem.resp(w).valid && io.dmem.resp(w).bits.is_hella) || (io.dmem.store_ack(w).valid && io.dmem.store_ack(w).bits.is_hella)) { hella_state := h_ready io.hellacache.resp.valid := true.B io.hellacache.resp.bits.addr := hella_req.addr io.hellacache.resp.bits.tag := hella_req.tag io.hellacache.resp.bits.cmd := hella_req.cmd io.hellacache.resp.bits.signed := hella_req.signed io.hellacache.resp.bits.size := hella_req.size io.hellacache.resp.bits.data := io.dmem.resp(w).bits.data } when (io.dmem.nack(w).valid && io.dmem.nack(w).bits.is_hella) { hella_state := h_replay } } } .elsewhen (hella_state === h_replay) { can_fire_hella_wakeup(0) := true.B when (will_fire_hella_wakeup(0) && dmem_req_fire(0)) { hella_state := h_wait } } .elsewhen (hella_state === h_dead) { when (io.dmem.ll_resp.fire && io.dmem.ll_resp.bits.is_hella) { hella_state := h_ready } for (w <- 0 until lsuWidth) { when (io.dmem.resp(w).valid && io.dmem.resp(w).bits.is_hella) { hella_state := h_ready } } } //------------------------------------------------------------- // Exception / Reset when (reset.asBool || io.core.exception) { ldq_head := 0.U ldq_tail := 0.U when (reset.asBool) { stq_head := 0.U stq_tail := 0.U stq_commit_head := 0.U stq_execute_head := 0.U for (i <- 0 until numStqEntries) { stq_valid(i) := false.B } } .otherwise // exception { stq_tail := stq_commit_head for (i <- 0 until numStqEntries) { when (!stq_committed(i) && !stq_succeeded(i)) { stq_valid(i) := false.B } } } for (i <- 0 until numLdqEntries) { ldq_valid(i) := false.B } } } /** * Object to take an address and generate an 8-bit mask of which bytes within a * double-word. */ object GenByteMask { def apply(addr: UInt, size: UInt): UInt = { val mask = Wire(UInt(8.W)) mask := MuxCase(255.U(8.W), Array( (size === 0.U) -> (1.U(8.W) << addr(2,0)), (size === 1.U) -> (3.U(8.W) << (addr(2,1) << 1.U)), (size === 2.U) -> Mux(addr(2), 240.U(8.W), 15.U(8.W)), (size === 3.U) -> 255.U(8.W))) mask } } object ForwardingAgeLogic { def apply(n: Int, matches: UInt, youngest: UInt): (UInt, Bool) = { val logic = Module(new ForwardingAgeLogic(n)) logic.io.matches := matches logic.io.youngest := youngest (logic.io.found_idx, logic.io.found) } } class ForwardingAgeLogic(n: Int) extends Module() { val io = IO(new Bundle { val matches = Input(UInt(n.W)) // bit vector of addresses that match // between the load and the SAQ val youngest = Input(UInt(log2Ceil(n).W)) // needed to get "age" val found = Output(Bool()) val found_idx = Output(UInt(log2Ceil(n).W)) }) // generating mask that zeroes out anything younger than tail val age_mask = Wire(Vec(n, Bool())) for (i <- 0 until n) { age_mask(i) := true.B when (i.U >= io.youngest) // currently the tail points PAST last store, so use >= { age_mask(i) := false.B } } // Priority encoder with moving tail: double length val matches = Wire(UInt((2*n).W)) matches := Cat(io.matches & age_mask.asUInt, io.matches) val found_match = Reg(Bool()) val found_idx = Reg(UInt(log2Ceil(n).W)) found_match := false.B found_idx := 0.U io.found_idx := found_idx io.found := found_match // look for youngest, approach from the oldest side, let the last one found stick for (i <- 0 until (2*n)) { when (matches(i)) { found_match := true.B found_idx := (i % n).U } } }
module ForwardingAgeLogic_3( // @[lsu.scala:1972:7] input clock, // @[lsu.scala:1972:7] input reset, // @[lsu.scala:1972:7] input [15:0] io_matches, // @[lsu.scala:1974:14] input [3:0] io_youngest, // @[lsu.scala:1974:14] output io_found, // @[lsu.scala:1974:14] output [3:0] io_found_idx // @[lsu.scala:1974:14] ); wire [15:0] io_matches_0 = io_matches; // @[lsu.scala:1972:7] wire [3:0] io_youngest_0 = io_youngest; // @[lsu.scala:1972:7] wire age_mask_15 = 1'h0; // @[lsu.scala:1985:22] wire io_found_0; // @[lsu.scala:1972:7] wire [3:0] io_found_idx_0; // @[lsu.scala:1972:7] wire age_mask_0; // @[lsu.scala:1985:22] wire age_mask_1; // @[lsu.scala:1985:22] wire age_mask_2; // @[lsu.scala:1985:22] wire age_mask_3; // @[lsu.scala:1985:22] wire age_mask_4; // @[lsu.scala:1985:22] wire age_mask_5; // @[lsu.scala:1985:22] wire age_mask_6; // @[lsu.scala:1985:22] wire age_mask_7; // @[lsu.scala:1985:22] wire age_mask_8; // @[lsu.scala:1985:22] wire age_mask_9; // @[lsu.scala:1985:22] wire age_mask_10; // @[lsu.scala:1985:22] wire age_mask_11; // @[lsu.scala:1985:22] wire age_mask_12; // @[lsu.scala:1985:22] wire age_mask_13; // @[lsu.scala:1985:22] wire age_mask_14; // @[lsu.scala:1985:22] assign age_mask_0 = |io_youngest_0; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_1 = |(io_youngest_0[3:1]); // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_2 = io_youngest_0 > 4'h2; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_3 = |(io_youngest_0[3:2]); // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_4 = io_youngest_0 > 4'h4; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_5 = io_youngest_0 > 4'h5; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_6 = io_youngest_0 > 4'h6; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_7 = io_youngest_0[3]; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_8 = io_youngest_0 > 4'h8; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_9 = io_youngest_0 > 4'h9; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_10 = io_youngest_0 > 4'hA; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_11 = io_youngest_0 > 4'hB; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_12 = io_youngest_0 > 4'hC; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_13 = io_youngest_0 > 4'hD; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] assign age_mask_14 = &io_youngest_0; // @[lsu.scala:1972:7, :1985:22, :1988:17, :1989:15, :1990:5, :1991:19] wire [31:0] _matches_T_2; // @[lsu.scala:1997:17] wire [31:0] matches_0; // @[lsu.scala:1996:21] wire [1:0] matches_lo_lo_lo = {age_mask_1, age_mask_0}; // @[lsu.scala:1985:22, :1997:40] wire [1:0] matches_lo_lo_hi = {age_mask_3, age_mask_2}; // @[lsu.scala:1985:22, :1997:40] wire [3:0] matches_lo_lo = {matches_lo_lo_hi, matches_lo_lo_lo}; // @[lsu.scala:1997:40] wire [1:0] matches_lo_hi_lo = {age_mask_5, age_mask_4}; // @[lsu.scala:1985:22, :1997:40] wire [1:0] matches_lo_hi_hi = {age_mask_7, age_mask_6}; // @[lsu.scala:1985:22, :1997:40] wire [3:0] matches_lo_hi = {matches_lo_hi_hi, matches_lo_hi_lo}; // @[lsu.scala:1997:40] wire [7:0] matches_lo = {matches_lo_hi, matches_lo_lo}; // @[lsu.scala:1997:40] wire [1:0] matches_hi_lo_lo = {age_mask_9, age_mask_8}; // @[lsu.scala:1985:22, :1997:40] wire [1:0] matches_hi_lo_hi = {age_mask_11, age_mask_10}; // @[lsu.scala:1985:22, :1997:40] wire [3:0] matches_hi_lo = {matches_hi_lo_hi, matches_hi_lo_lo}; // @[lsu.scala:1997:40] wire [1:0] matches_hi_hi_lo = {age_mask_13, age_mask_12}; // @[lsu.scala:1985:22, :1997:40] wire [1:0] matches_hi_hi_hi = {1'h0, age_mask_14}; // @[lsu.scala:1985:22, :1997:40] wire [3:0] matches_hi_hi = {matches_hi_hi_hi, matches_hi_hi_lo}; // @[lsu.scala:1997:40] wire [7:0] matches_hi = {matches_hi_hi, matches_hi_lo}; // @[lsu.scala:1997:40] wire [15:0] _matches_T = {matches_hi, matches_lo}; // @[lsu.scala:1997:40] wire [15:0] _matches_T_1 = io_matches_0 & _matches_T; // @[lsu.scala:1972:7, :1997:{29,40}] assign _matches_T_2 = {_matches_T_1, io_matches_0}; // @[lsu.scala:1972:7, :1997:{17,29}] assign matches_0 = _matches_T_2; // @[lsu.scala:1996:21, :1997:17] reg found_match; // @[lsu.scala:2000:24] assign io_found_0 = found_match; // @[lsu.scala:1972:7, :2000:24] reg [3:0] found_idx; // @[lsu.scala:2001:22] assign io_found_idx_0 = found_idx; // @[lsu.scala:1972:7, :2001:22] always @(posedge clock) begin // @[lsu.scala:1972:7] found_match <= |matches_0; // @[lsu.scala:1996:21, :2000:24, :2012:7, :2013:22] found_idx <= matches_0[31] ? 4'hF : matches_0[30] ? 4'hE : matches_0[29] ? 4'hD : matches_0[28] ? 4'hC : matches_0[27] ? 4'hB : matches_0[26] ? 4'hA : matches_0[25] ? 4'h9 : matches_0[24] ? 4'h8 : matches_0[23] ? 4'h7 : matches_0[22] ? 4'h6 : matches_0[21] ? 4'h5 : matches_0[20] ? 4'h4 : matches_0[19] ? 4'h3 : matches_0[18] ? 4'h2 : matches_0[17] ? 4'h1 : matches_0[16] ? 4'h0 : matches_0[15] ? 4'hF : matches_0[14] ? 4'hE : matches_0[13] ? 4'hD : matches_0[12] ? 4'hC : matches_0[11] ? 4'hB : matches_0[10] ? 4'hA : matches_0[9] ? 4'h9 : matches_0[8] ? 4'h8 : matches_0[7] ? 4'h7 : matches_0[6] ? 4'h6 : matches_0[5] ? 4'h5 : matches_0[4] ? 4'h4 : matches_0[3] ? 4'h3 : matches_0[2] ? 4'h2 : {3'h0, matches_0[1]}; // @[lsu.scala:1988:17, :1989:15, :1990:5, :1991:19, :1996:21, :2001:22, :2011:20, :2012:7, :2014:20] always @(posedge) assign io_found = io_found_0; // @[lsu.scala:1972:7] assign io_found_idx = io_found_idx_0; // @[lsu.scala:1972:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_14( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire [8:0] _c_opcodes_set_T = 9'h0; // @[Monitor.scala:767:79] wire [8:0] _c_sizes_set_T = 9'h0; // @[Monitor.scala:768:77] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_43 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_49 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [5:0] _c_first_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_first_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_first_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_first_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_set_wo_ready_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_set_wo_ready_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_set_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_set_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_opcodes_set_interm_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_opcodes_set_interm_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_sizes_set_interm_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_sizes_set_interm_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_opcodes_set_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_opcodes_set_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_sizes_set_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_sizes_set_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_probe_ack_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_probe_ack_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _c_probe_ack_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _c_probe_ack_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _same_cycle_resp_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _same_cycle_resp_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _same_cycle_resp_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _same_cycle_resp_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _same_cycle_resp_WIRE_4_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _same_cycle_resp_WIRE_5_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [515:0] _c_sizes_set_T_1 = 516'h0; // @[Monitor.scala:768:52] wire [514:0] _c_opcodes_set_T_1 = 515'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [63:0] _c_set_wo_ready_T = 64'h1; // @[OneHot.scala:58:35] wire [63:0] _c_set_T = 64'h1; // @[OneHot.scala:58:35] wire [279:0] c_sizes_set = 280'h0; // @[Monitor.scala:741:34] wire [139:0] c_opcodes_set = 140'h0; // @[Monitor.scala:740:34] wire [34:0] c_set = 35'h0; // @[Monitor.scala:738:34] wire [34:0] c_set_wo_ready = 35'h0; // @[Monitor.scala:739:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [5:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_1 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_7 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_13 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_19 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 6'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_29 = _source_ok_T_28 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_33 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [28:0] _is_aligned_T = {17'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_34 = io_in_d_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_34; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_35 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_41 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_47 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_53 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_36 = _source_ok_T_35 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_40; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_42 = _source_ok_T_41 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_44 = _source_ok_T_42; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_46; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_48 = _source_ok_T_47 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_50 = _source_ok_T_48; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_52; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_54 = _source_ok_T_53 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_58; // @[Parameters.scala:1138:31] wire _source_ok_T_59 = io_in_d_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_59; // @[Parameters.scala:1138:31] wire _source_ok_T_60 = io_in_d_bits_source_0 == 6'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire _source_ok_T_61 = io_in_d_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire _source_ok_T_62 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_63 = _source_ok_T_62 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_64 = _source_ok_T_63 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_65 = _source_ok_T_64 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_67 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _T_1524 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1524; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1524; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [5:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] wire _T_1597 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1597; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1597; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1597; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [5:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [34:0] inflight; // @[Monitor.scala:614:27] reg [139:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [279:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [34:0] a_set; // @[Monitor.scala:626:34] wire [34:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [139:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [279:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [8:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [8:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [8:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [8:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [8:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [139:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [139:0] _a_opcode_lookup_T_6 = {136'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [139:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[139:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [8:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [8:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [8:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [8:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [8:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [279:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [279:0] _a_size_lookup_T_6 = {272'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [279:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[279:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [63:0] _GEN_3 = 64'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35] wire [63:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[34:0] : 35'h0; // @[OneHot.scala:58:35] wire _T_1450 = _T_1524 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1450 ? _a_set_T[34:0] : 35'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1450 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1450 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [8:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [514:0] _a_opcodes_set_T_1 = {511'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1450 ? _a_opcodes_set_T_1[139:0] : 140'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [8:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [515:0] _a_sizes_set_T_1 = {511'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1450 ? _a_sizes_set_T_1[279:0] : 280'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [34:0] d_clr; // @[Monitor.scala:664:34] wire [34:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [139:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [279:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1496 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [63:0] _GEN_5 = 64'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1496 & ~d_release_ack ? _d_clr_wo_ready_T[34:0] : 35'h0; // @[OneHot.scala:58:35] wire _T_1465 = _T_1597 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1465 ? _d_clr_T[34:0] : 35'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_5 = 527'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1465 ? _d_opcodes_clr_T_5[139:0] : 140'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [526:0] _d_sizes_clr_T_5 = 527'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1465 ? _d_sizes_clr_T_5[279:0] : 280'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [34:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [34:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [34:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [139:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [139:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [139:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [279:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [279:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [279:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [34:0] inflight_1; // @[Monitor.scala:726:35] wire [34:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [139:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [139:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [279:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [279:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [139:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [139:0] _c_opcode_lookup_T_6 = {136'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [139:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[139:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [279:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [279:0] _c_size_lookup_T_6 = {272'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [279:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[279:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [34:0] d_clr_1; // @[Monitor.scala:774:34] wire [34:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [139:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [279:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1568 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1568 & d_release_ack_1 ? _d_clr_wo_ready_T_1[34:0] : 35'h0; // @[OneHot.scala:58:35] wire _T_1550 = _T_1597 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1550 ? _d_clr_T_1[34:0] : 35'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_11 = 527'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1550 ? _d_opcodes_clr_T_11[139:0] : 140'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [526:0] _d_sizes_clr_T_11 = 527'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1550 ? _d_sizes_clr_T_11[279:0] : 280'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 6'h0; // @[Monitor.scala:36:7, :795:113] wire [34:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [34:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [139:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [139:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [279:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [279:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_19( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_39 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File decode.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket.Instructions32 import freechips.rocketchip.rocket.CustomInstructions._ import freechips.rocketchip.rocket.RVCExpander import freechips.rocketchip.rocket.{CSR,Causes} import freechips.rocketchip.util.{uintToBitPat,UIntIsOneOf} import FUConstants._ import boom.v3.common._ import boom.v3.util._ // scalastyle:off /** * Abstract trait giving defaults and other relevant values to different Decode constants/ */ abstract trait DecodeConstants extends freechips.rocketchip.rocket.constants.ScalarOpConstants with freechips.rocketchip.rocket.constants.MemoryOpConstants { val xpr64 = Y // TODO inform this from xLen val DC2 = BitPat.dontCare(2) // Makes the listing below more readable def decode_default: List[BitPat] = // frs3_en wakeup_delay // is val inst? | imm sel | bypassable (aka, known/fixed latency) // | is fp inst? | | uses_ldq | | is_br // | | is single-prec? rs1 regtype | | | uses_stq | | | // | | | micro-code | rs2 type| | | | is_amo | | | // | | | | iq-type func unit | | | | | | | is_fence | | | // | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall? // | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd // | | | | | | | | | | | | | | | | | | | | | | | | List(N, N, X, uopX , IQT_INT, FU_X , RT_X , DC2 ,DC2 ,X, IS_X, X, X, X, X, N, M_X, DC2, X, X, N, N, X, CSR.X) val table: Array[(BitPat, List[BitPat])] } // scalastyle:on /** * Decoded control signals */ class CtrlSigs extends Bundle { val legal = Bool() val fp_val = Bool() val fp_single = Bool() val uopc = UInt(UOPC_SZ.W) val iq_type = UInt(IQT_SZ.W) val fu_code = UInt(FUC_SZ.W) val dst_type = UInt(2.W) val rs1_type = UInt(2.W) val rs2_type = UInt(2.W) val frs3_en = Bool() val imm_sel = UInt(IS_X.getWidth.W) val uses_ldq = Bool() val uses_stq = Bool() val is_amo = Bool() val is_fence = Bool() val is_fencei = Bool() val mem_cmd = UInt(freechips.rocketchip.rocket.M_SZ.W) val wakeup_delay = UInt(2.W) val bypassable = Bool() val is_br = Bool() val is_sys_pc2epc = Bool() val inst_unique = Bool() val flush_on_commit = Bool() val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W) val rocc = Bool() def decode(inst: UInt, table: Iterable[(BitPat, List[BitPat])]) = { val decoder = freechips.rocketchip.rocket.DecodeLogic(inst, XDecode.decode_default, table) val sigs = Seq(legal, fp_val, fp_single, uopc, iq_type, fu_code, dst_type, rs1_type, rs2_type, frs3_en, imm_sel, uses_ldq, uses_stq, is_amo, is_fence, is_fencei, mem_cmd, wakeup_delay, bypassable, is_br, is_sys_pc2epc, inst_unique, flush_on_commit, csr_cmd) sigs zip decoder map {case(s,d) => s := d} rocc := false.B this } } // scalastyle:off /** * Decode constants for RV32 */ object X32Decode extends DecodeConstants { // frs3_en wakeup_delay // is val inst? | imm sel | bypassable (aka, known/fixed latency) // | is fp inst? | | uses_ldq | | is_br // | | is single-prec? rs1 regtype | | | uses_stq | | | // | | | micro-code | rs2 type| | | | is_amo | | | // | | | | iq-type func unit | | | | | | | is_fence | | | // | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall? // | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | Instructions32.SLLI -> List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), Instructions32.SRLI -> List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), Instructions32.SRAI -> List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N) ) } /** * Decode constants for RV64 */ object X64Decode extends DecodeConstants { // frs3_en wakeup_delay // is val inst? | imm sel | bypassable (aka, known/fixed latency) // | is fp inst? | | uses_ldq | | is_br // | | is single-prec? rs1 regtype | | | uses_stq | | | // | | | micro-code | rs2 type| | | | is_amo | | | // | | | | iq-type func unit | | | | | | | is_fence | | | // | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall? // | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | LD -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), LWU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), SD -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), SLLI -> List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRLI -> List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRAI -> List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), ADDIW -> List(Y, N, X, uopADDIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLLIW -> List(Y, N, X, uopSLLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRAIW -> List(Y, N, X, uopSRAIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRLIW -> List(Y, N, X, uopSRLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), ADDW -> List(Y, N, X, uopADDW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SUBW -> List(Y, N, X, uopSUBW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLLW -> List(Y, N, X, uopSLLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRAW -> List(Y, N, X, uopSRAW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRLW -> List(Y, N, X, uopSRLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N) ) } /** * Overall Decode constants */ object XDecode extends DecodeConstants { // frs3_en wakeup_delay // is val inst? | imm sel | bypassable (aka, known/fixed latency) // | is fp inst? | | uses_ldq | | is_br // | | is single-prec? rs1 regtype | | | uses_stq | | | // | | | micro-code | rs2 type| | | | is_amo | | | // | | | | iq-type func unit | | | | | | | is_fence | | | // | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall? // | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | LW -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), LH -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), LHU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), LB -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), LBU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N), SW -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), SH -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), SB -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), LUI -> List(Y, N, X, uopLUI , IQT_INT, FU_ALU , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), ADDI -> List(Y, N, X, uopADDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), ANDI -> List(Y, N, X, uopANDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), ORI -> List(Y, N, X, uopORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), XORI -> List(Y, N, X, uopXORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLTI -> List(Y, N, X, uopSLTI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLTIU -> List(Y, N, X, uopSLTIU, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLL -> List(Y, N, X, uopSLL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), ADD -> List(Y, N, X, uopADD , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SUB -> List(Y, N, X, uopSUB , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLT -> List(Y, N, X, uopSLT , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SLTU -> List(Y, N, X, uopSLTU , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), AND -> List(Y, N, X, uopAND , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), OR -> List(Y, N, X, uopOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), XOR -> List(Y, N, X, uopXOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRA -> List(Y, N, X, uopSRA , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), SRL -> List(Y, N, X, uopSRL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N), MUL -> List(Y, N, X, uopMUL , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), MULH -> List(Y, N, X, uopMULH , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), MULHU -> List(Y, N, X, uopMULHU, IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), MULHSU -> List(Y, N, X, uopMULHSU,IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), MULW -> List(Y, N, X, uopMULW , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), DIV -> List(Y, N, X, uopDIV , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), DIVU -> List(Y, N, X, uopDIVU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), REM -> List(Y, N, X, uopREM , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), REMU -> List(Y, N, X, uopREMU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), DIVW -> List(Y, N, X, uopDIVW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), DIVUW -> List(Y, N, X, uopDIVUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), REMW -> List(Y, N, X, uopREMW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), REMUW -> List(Y, N, X, uopREMUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), AUIPC -> List(Y, N, X, uopAUIPC, IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N), // use BRU for the PC read JAL -> List(Y, N, X, uopJAL , IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_J, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N), JALR -> List(Y, N, X, uopJALR , IQT_INT, FU_JMP , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N), BEQ -> List(Y, N, X, uopBEQ , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N), BNE -> List(Y, N, X, uopBNE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N), BGE -> List(Y, N, X, uopBGE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N), BGEU -> List(Y, N, X, uopBGEU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N), BLT -> List(Y, N, X, uopBLT , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N), BLTU -> List(Y, N, X, uopBLTU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N), // I-type, the immediate12 holds the CSR register. CSRRW -> List(Y, N, X, uopCSRRW, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W), CSRRS -> List(Y, N, X, uopCSRRS, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S), CSRRC -> List(Y, N, X, uopCSRRC, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C), CSRRWI -> List(Y, N, X, uopCSRRWI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W), CSRRSI -> List(Y, N, X, uopCSRRSI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S), CSRRCI -> List(Y, N, X, uopCSRRCI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C), SFENCE_VMA->List(Y,N, X, uopSFENCE,IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N,M_SFENCE,0.U,N, N, N, Y, Y, CSR.N), ECALL -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I), EBREAK -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I), SRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I), MRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I), DRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I), WFI -> List(Y, N, X, uopWFI ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I), FENCE_I -> List(Y, N, X, uopNOP , IQT_INT, FU_X , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, Y, M_X , 0.U, N, N, N, Y, Y, CSR.N), FENCE -> List(Y, N, X, uopFENCE, IQT_INT, FU_MEM , RT_X , RT_X , RT_X , N, IS_X, N, Y, N, Y, N, M_X , 0.U, N, N, N, Y, Y, CSR.N), // TODO PERF make fence higher performance // currently serializes pipeline // frs3_en wakeup_delay // is val inst? | imm sel | bypassable (aka, known/fixed latency) // | is fp inst? | | uses_ldq | | is_br // | | is single-prec? rs1 regtype | | | uses_stq | | | // | | | micro-code | rs2 type| | | | is_amo | | | // | | | | iq-type func unit | | | | | | | is_fence | | | // | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall? // | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd // A-type | | | | | | | | | | | | | | | | | | | | | | | | AMOADD_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N), // TODO make AMOs higherperformance AMOXOR_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N), AMOSWAP_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N), AMOAND_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N), AMOOR_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N), AMOMIN_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N), AMOMINU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N), AMOMAX_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N), AMOMAXU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N), AMOADD_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N), AMOXOR_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N), AMOSWAP_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N), AMOAND_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N), AMOOR_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N), AMOMIN_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N), AMOMINU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N), AMOMAX_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N), AMOMAXU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N), LR_W -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N), LR_D -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N), SC_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N), SC_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N) ) } /** * FP Decode constants */ object FDecode extends DecodeConstants { val table: Array[(BitPat, List[BitPat])] = Array( // frs3_en wakeup_delay // | imm sel | bypassable (aka, known/fixed latency) // | | uses_ldq | | is_br // is val inst? rs1 regtype | | | uses_stq | | | // | is fp inst? | rs2 type| | | | is_amo | | | // | | is dst single-prec? | | | | | | | is_fence | | | // | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall // | | | | iq_type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd FLW -> List(Y, Y, Y, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N), FLD -> List(Y, Y, N, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N), FSW -> List(Y, Y, Y, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), // sort of a lie; broken into two micro-ops FSD -> List(Y, Y, N, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), FCLASS_S-> List(Y, Y, Y, uopFCLASS_S,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCLASS_D-> List(Y, Y, N, uopFCLASS_D,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMV_W_X -> List(Y, Y, Y, uopFMV_W_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMV_D_X -> List(Y, Y, N, uopFMV_D_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMV_X_W -> List(Y, Y, Y, uopFMV_X_W, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMV_X_D -> List(Y, Y, N, uopFMV_X_D, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSGNJ_S -> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSGNJ_D -> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSGNJX_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSGNJX_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSGNJN_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSGNJN_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), // FP to FP FCVT_S_D-> List(Y, Y, Y, uopFCVT_S_D,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_D_S-> List(Y, Y, N, uopFCVT_D_S,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), // Int to FP FCVT_S_W-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_S_WU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_S_L-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_S_LU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_D_W-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_D_WU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_D_L-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_D_LU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), // FP to Int FCVT_W_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_WU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_L_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_LU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_W_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_WU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_L_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FCVT_LU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), // "fp_single" is used for wb_data formatting (and debugging) FEQ_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FLT_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FLE_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FEQ_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FLT_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FLE_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMIN_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMAX_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMIN_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMAX_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FADD_S ->List(Y, Y, Y, uopFADD_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSUB_S ->List(Y, Y, Y, uopFSUB_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMUL_S ->List(Y, Y, Y, uopFMUL_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FADD_D ->List(Y, Y, N, uopFADD_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSUB_D ->List(Y, Y, N, uopFSUB_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMUL_D ->List(Y, Y, N, uopFMUL_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMADD_S ->List(Y, Y, Y, uopFMADD_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMSUB_S ->List(Y, Y, Y, uopFMSUB_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FNMADD_S ->List(Y, Y, Y, uopFNMADD_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FNMSUB_S ->List(Y, Y, Y, uopFNMSUB_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMADD_D ->List(Y, Y, N, uopFMADD_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FMSUB_D ->List(Y, Y, N, uopFMSUB_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FNMADD_D ->List(Y, Y, N, uopFNMADD_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FNMSUB_D ->List(Y, Y, N, uopFNMSUB_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N) ) } /** * FP Divide SquareRoot Constants */ object FDivSqrtDecode extends DecodeConstants { val table: Array[(BitPat, List[BitPat])] = Array( // frs3_en wakeup_delay // | imm sel | bypassable (aka, known/fixed latency) // | | uses_ldq | | is_br // is val inst? rs1 regtype | | | uses_stq | | | // | is fp inst? | rs2 type| | | | is_amo | | | // | | is dst single-prec? | | | | | | | is_fence | | | // | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall // | | | | iq-type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd FDIV_S ->List(Y, Y, Y, uopFDIV_S , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FDIV_D ->List(Y, Y, N, uopFDIV_D , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSQRT_S ->List(Y, Y, Y, uopFSQRT_S, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), FSQRT_D ->List(Y, Y, N, uopFSQRT_D, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N) ) } //scalastyle:on /** * RoCC initial decode */ object RoCCDecode extends DecodeConstants { // Note: We use FU_CSR since CSR instructions cannot co-execute with RoCC instructions // frs3_en wakeup_delay // is val inst? | imm sel | bypassable (aka, known/fixed latency) // | is fp inst? | | uses_ldq | | is_br // | | is single-prec rs1 regtype | | | uses_stq | | | // | | | | rs2 type| | | | is_amo | | | // | | | micro-code func unit | | | | | | | is_fence | | | // | | | | iq-type | | | | | | | | | is_fencei | | | is breakpoint or ecall? // | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it) // | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit // | | | | | | | | | | | | | | | | | | | | | | | csr cmd // | | | | | | | | | | | | | | | | | | | | | | | | val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | | CUSTOM0 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM0_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM0_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM0_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM0_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM0_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM1_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM1_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM1_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM1_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM1_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM2_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM2_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM2_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM2_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM2_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM3 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM3_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM3_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM3_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM3_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N), CUSTOM3_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N) ) } /** * IO bundle for the Decode unit */ class DecodeUnitIo(implicit p: Parameters) extends BoomBundle { val enq = new Bundle { val uop = Input(new MicroOp()) } val deq = new Bundle { val uop = Output(new MicroOp()) } // from CSRFile val status = Input(new freechips.rocketchip.rocket.MStatus()) val csr_decode = Flipped(new freechips.rocketchip.rocket.CSRDecodeIO) val interrupt = Input(Bool()) val interrupt_cause = Input(UInt(xLen.W)) } /** * Decode unit that takes in a single instruction and generates a MicroOp. */ class DecodeUnit(implicit p: Parameters) extends BoomModule with freechips.rocketchip.rocket.constants.MemoryOpConstants { val io = IO(new DecodeUnitIo) val uop = Wire(new MicroOp()) uop := io.enq.uop var decode_table = XDecode.table if (usingFPU) decode_table ++= FDecode.table if (usingFPU && usingFDivSqrt) decode_table ++= FDivSqrtDecode.table if (usingRoCC) decode_table ++= RoCCDecode.table decode_table ++= (if (xLen == 64) X64Decode.table else X32Decode.table) val inst = uop.inst val cs = Wire(new CtrlSigs()).decode(inst, decode_table) // Exception Handling io.csr_decode.inst := inst val csr_en = cs.csr_cmd.isOneOf(CSR.S, CSR.C, CSR.W) val csr_ren = cs.csr_cmd.isOneOf(CSR.S, CSR.C) && uop.lrs1 === 0.U val system_insn = cs.csr_cmd === CSR.I val sfence = cs.uopc === uopSFENCE val cs_legal = cs.legal // dontTouch(cs_legal) val id_illegal_insn = !cs_legal || cs.fp_val && io.csr_decode.fp_illegal || // TODO check for illegal rm mode: (io.fpu.illegal_rm) cs.rocc && io.csr_decode.rocc_illegal || cs.is_amo && !io.status.isa('a'-'a') || (cs.fp_val && !cs.fp_single) && !io.status.isa('d'-'a') || csr_en && (io.csr_decode.read_illegal || !csr_ren && io.csr_decode.write_illegal) || ((sfence || system_insn) && io.csr_decode.system_illegal) // cs.div && !csr.io.status.isa('m'-'a') || TODO check for illegal div instructions def checkExceptions(x: Seq[(Bool, UInt)]) = (x.map(_._1).reduce(_||_), PriorityMux(x)) val (xcpt_valid, xcpt_cause) = checkExceptions(List( (io.interrupt && !io.enq.uop.is_sfb, io.interrupt_cause), // Disallow interrupts while we are handling a SFB (uop.bp_debug_if, (CSR.debugTriggerCause).U), (uop.bp_xcpt_if, (Causes.breakpoint).U), (uop.xcpt_pf_if, (Causes.fetch_page_fault).U), (uop.xcpt_ae_if, (Causes.fetch_access).U), (id_illegal_insn, (Causes.illegal_instruction).U))) uop.exception := xcpt_valid uop.exc_cause := xcpt_cause //------------------------------------------------------------- uop.uopc := cs.uopc uop.iq_type := cs.iq_type uop.fu_code := cs.fu_code // x-registers placed in 0-31, f-registers placed in 32-63. // This allows us to straight-up compare register specifiers and not need to // verify the rtypes (e.g., bypassing in rename). uop.ldst := inst(RD_MSB,RD_LSB) uop.lrs1 := inst(RS1_MSB,RS1_LSB) uop.lrs2 := inst(RS2_MSB,RS2_LSB) uop.lrs3 := inst(RS3_MSB,RS3_LSB) uop.ldst_val := cs.dst_type =/= RT_X && !(uop.ldst === 0.U && uop.dst_rtype === RT_FIX) uop.dst_rtype := cs.dst_type uop.lrs1_rtype := cs.rs1_type uop.lrs2_rtype := cs.rs2_type uop.frs3_en := cs.frs3_en uop.ldst_is_rs1 := uop.is_sfb_shadow // SFB optimization when (uop.is_sfb_shadow && cs.rs2_type === RT_X) { uop.lrs2_rtype := RT_FIX uop.lrs2 := inst(RD_MSB,RD_LSB) uop.ldst_is_rs1 := false.B } .elsewhen (uop.is_sfb_shadow && cs.uopc === uopADD && inst(RS1_MSB,RS1_LSB) === 0.U) { uop.uopc := uopMOV uop.lrs1 := inst(RD_MSB, RD_LSB) uop.ldst_is_rs1 := true.B } when (uop.is_sfb_br) { uop.fu_code := FU_JMP } uop.fp_val := cs.fp_val uop.fp_single := cs.fp_single // TODO use this signal instead of the FPU decode's table signal? uop.mem_cmd := cs.mem_cmd uop.mem_size := Mux(cs.mem_cmd.isOneOf(M_SFENCE, M_FLUSH_ALL), Cat(uop.lrs2 =/= 0.U, uop.lrs1 =/= 0.U), inst(13,12)) uop.mem_signed := !inst(14) uop.uses_ldq := cs.uses_ldq uop.uses_stq := cs.uses_stq uop.is_amo := cs.is_amo uop.is_fence := cs.is_fence uop.is_fencei := cs.is_fencei uop.is_sys_pc2epc := cs.is_sys_pc2epc uop.is_unique := cs.inst_unique uop.flush_on_commit := cs.flush_on_commit || (csr_en && !csr_ren && io.csr_decode.write_flush) uop.bypassable := cs.bypassable //------------------------------------------------------------- // immediates // repackage the immediate, and then pass the fewest number of bits around val di24_20 = Mux(cs.imm_sel === IS_B || cs.imm_sel === IS_S, inst(11,7), inst(24,20)) uop.imm_packed := Cat(inst(31,25), di24_20, inst(19,12)) //------------------------------------------------------------- uop.is_br := cs.is_br uop.is_jal := (uop.uopc === uopJAL) uop.is_jalr := (uop.uopc === uopJALR) // uop.is_jump := cs.is_jal || (uop.uopc === uopJALR) // uop.is_ret := (uop.uopc === uopJALR) && // (uop.ldst === X0) && // (uop.lrs1 === RA) // uop.is_call := (uop.uopc === uopJALR || uop.uopc === uopJAL) && // (uop.ldst === RA) //------------------------------------------------------------- io.deq.uop := uop } /** * Smaller Decode unit for the Frontend to decode different * branches. * Accepts EXPANDED RVC instructions */ class BranchDecodeSignals(implicit p: Parameters) extends BoomBundle { val is_ret = Bool() val is_call = Bool() val target = UInt(vaddrBitsExtended.W) val cfi_type = UInt(CFI_SZ.W) // Is this branch a short forwards jump? val sfb_offset = Valid(UInt(log2Ceil(icBlockBytes).W)) // Is this instruction allowed to be inside a sfb? val shadowable = Bool() } class BranchDecode(implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { val inst = Input(UInt(32.W)) val pc = Input(UInt(vaddrBitsExtended.W)) val out = Output(new BranchDecodeSignals) }) val bpd_csignals = freechips.rocketchip.rocket.DecodeLogic(io.inst, List[BitPat](N, N, N, N, X), //// is br? //// | is jal? //// | | is jalr? //// | | | //// | | | shadowable //// | | | | has_rs2 //// | | | | | Array[(BitPat, List[BitPat])]( JAL -> List(N, Y, N, N, X), JALR -> List(N, N, Y, N, X), BEQ -> List(Y, N, N, N, X), BNE -> List(Y, N, N, N, X), BGE -> List(Y, N, N, N, X), BGEU -> List(Y, N, N, N, X), BLT -> List(Y, N, N, N, X), BLTU -> List(Y, N, N, N, X), SLLI -> List(N, N, N, Y, N), SRLI -> List(N, N, N, Y, N), SRAI -> List(N, N, N, Y, N), ADDIW -> List(N, N, N, Y, N), SLLIW -> List(N, N, N, Y, N), SRAIW -> List(N, N, N, Y, N), SRLIW -> List(N, N, N, Y, N), ADDW -> List(N, N, N, Y, Y), SUBW -> List(N, N, N, Y, Y), SLLW -> List(N, N, N, Y, Y), SRAW -> List(N, N, N, Y, Y), SRLW -> List(N, N, N, Y, Y), LUI -> List(N, N, N, Y, N), ADDI -> List(N, N, N, Y, N), ANDI -> List(N, N, N, Y, N), ORI -> List(N, N, N, Y, N), XORI -> List(N, N, N, Y, N), SLTI -> List(N, N, N, Y, N), SLTIU -> List(N, N, N, Y, N), SLL -> List(N, N, N, Y, Y), ADD -> List(N, N, N, Y, Y), SUB -> List(N, N, N, Y, Y), SLT -> List(N, N, N, Y, Y), SLTU -> List(N, N, N, Y, Y), AND -> List(N, N, N, Y, Y), OR -> List(N, N, N, Y, Y), XOR -> List(N, N, N, Y, Y), SRA -> List(N, N, N, Y, Y), SRL -> List(N, N, N, Y, Y) )) val cs_is_br = bpd_csignals(0)(0) val cs_is_jal = bpd_csignals(1)(0) val cs_is_jalr = bpd_csignals(2)(0) val cs_is_shadowable = bpd_csignals(3)(0) val cs_has_rs2 = bpd_csignals(4)(0) io.out.is_call := (cs_is_jal || cs_is_jalr) && GetRd(io.inst) === RA io.out.is_ret := cs_is_jalr && GetRs1(io.inst) === BitPat("b00?01") && GetRd(io.inst) === X0 io.out.target := Mux(cs_is_br, ComputeBranchTarget(io.pc, io.inst, xLen), ComputeJALTarget(io.pc, io.inst, xLen)) io.out.cfi_type := Mux(cs_is_jalr, CFI_JALR, Mux(cs_is_jal, CFI_JAL, Mux(cs_is_br, CFI_BR, CFI_X))) val br_offset = Cat(io.inst(7), io.inst(30,25), io.inst(11,8), 0.U(1.W)) // Is a sfb if it points forwards (offset is positive) io.out.sfb_offset.valid := cs_is_br && !io.inst(31) && br_offset =/= 0.U && (br_offset >> log2Ceil(icBlockBytes)) === 0.U io.out.sfb_offset.bits := br_offset io.out.shadowable := cs_is_shadowable && ( !cs_has_rs2 || (GetRs1(io.inst) === GetRd(io.inst)) || (io.inst === ADD && GetRs1(io.inst) === X0) ) } /** * Track the current "branch mask", and give out the branch mask to each micro-op in Decode * (each micro-op in the machine has a branch mask which says which branches it * is being speculated under). * * @param pl_width pipeline width for the processor */ class BranchMaskGenerationLogic(val pl_width: Int)(implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { // guess if the uop is a branch (we'll catch this later) val is_branch = Input(Vec(pl_width, Bool())) // lock in that it's actually a branch and will fire, so we update // the branch_masks. val will_fire = Input(Vec(pl_width, Bool())) // give out tag immediately (needed in rename) // mask can come later in the cycle val br_tag = Output(Vec(pl_width, UInt(brTagSz.W))) val br_mask = Output(Vec(pl_width, UInt(maxBrCount.W))) // tell decoders the branch mask has filled up, but on the granularity // of an individual micro-op (so some micro-ops can go through) val is_full = Output(Vec(pl_width, Bool())) val brupdate = Input(new BrUpdateInfo()) val flush_pipeline = Input(Bool()) val debug_branch_mask = Output(UInt(maxBrCount.W)) }) val branch_mask = RegInit(0.U(maxBrCount.W)) //------------------------------------------------------------- // Give out the branch tag to each branch micro-op var allocate_mask = branch_mask val tag_masks = Wire(Vec(pl_width, UInt(maxBrCount.W))) for (w <- 0 until pl_width) { // TODO this is a loss of performance as we're blocking branches based on potentially fake branches io.is_full(w) := (allocate_mask === ~(0.U(maxBrCount.W))) && io.is_branch(w) // find br_tag and compute next br_mask val new_br_tag = Wire(UInt(brTagSz.W)) new_br_tag := 0.U tag_masks(w) := 0.U for (i <- maxBrCount-1 to 0 by -1) { when (~allocate_mask(i)) { new_br_tag := i.U tag_masks(w) := (1.U << i.U) } } io.br_tag(w) := new_br_tag allocate_mask = Mux(io.is_branch(w), tag_masks(w) | allocate_mask, allocate_mask) } //------------------------------------------------------------- // Give out the branch mask to each micro-op // (kill off the bits that corresponded to branches that aren't going to fire) var curr_mask = branch_mask for (w <- 0 until pl_width) { io.br_mask(w) := GetNewBrMask(io.brupdate, curr_mask) curr_mask = Mux(io.will_fire(w), tag_masks(w) | curr_mask, curr_mask) } //------------------------------------------------------------- // Update the current branch_mask when (io.flush_pipeline) { branch_mask := 0.U } .otherwise { val mask = Mux(io.brupdate.b2.mispredict, io.brupdate.b2.uop.br_mask, ~(0.U(maxBrCount.W))) branch_mask := GetNewBrMask(io.brupdate, curr_mask) & mask } io.debug_branch_mask := branch_mask } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) }
module BranchDecode_19( // @[decode.scala:623:7] input clock, // @[decode.scala:623:7] input reset, // @[decode.scala:623:7] input [31:0] io_inst, // @[decode.scala:625:14] input [39:0] io_pc, // @[decode.scala:625:14] output io_out_is_ret, // @[decode.scala:625:14] output io_out_is_call, // @[decode.scala:625:14] output [39:0] io_out_target, // @[decode.scala:625:14] output [2:0] io_out_cfi_type, // @[decode.scala:625:14] output io_out_sfb_offset_valid, // @[decode.scala:625:14] output [5:0] io_out_sfb_offset_bits, // @[decode.scala:625:14] output io_out_shadowable // @[decode.scala:625:14] ); wire [31:0] io_inst_0 = io_inst; // @[decode.scala:623:7] wire [39:0] io_pc_0 = io_pc; // @[decode.scala:623:7] wire [31:0] bpd_csignals_decoded_plaInput = io_inst_0; // @[pla.scala:77:22] wire _io_out_is_ret_T_6; // @[decode.scala:695:72] wire [39:0] _io_out_target_T = io_pc_0; // @[decode.scala:623:7] wire [39:0] _io_out_target_T_8 = io_pc_0; // @[decode.scala:623:7] wire _io_out_is_call_T_3; // @[decode.scala:694:47] wire [39:0] _io_out_target_T_16; // @[decode.scala:697:23] wire [2:0] _io_out_cfi_type_T_2; // @[decode.scala:700:8] wire _io_out_sfb_offset_valid_T_7; // @[decode.scala:710:76] wire _io_out_shadowable_T_11; // @[decode.scala:712:41] wire io_out_sfb_offset_valid_0; // @[decode.scala:623:7] wire [5:0] io_out_sfb_offset_bits_0; // @[decode.scala:623:7] wire io_out_is_ret_0; // @[decode.scala:623:7] wire io_out_is_call_0; // @[decode.scala:623:7] wire [39:0] io_out_target_0; // @[decode.scala:623:7] wire [2:0] io_out_cfi_type_0; // @[decode.scala:623:7] wire io_out_shadowable_0; // @[decode.scala:623:7] wire [31:0] bpd_csignals_decoded_invInputs = ~bpd_csignals_decoded_plaInput; // @[pla.scala:77:22, :78:21] wire [4:0] bpd_csignals_decoded_invMatrixOutputs; // @[pla.scala:120:37] wire [4:0] bpd_csignals_decoded; // @[pla.scala:81:23] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_1 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_2 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_3 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_4 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_5 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_6 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_7 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_8 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_9 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_10 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_11 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_12 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_13 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_14 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_15 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_1 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_2 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_3 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_4 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_5 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_6 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_7 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_8 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_9 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_10 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_11 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_12 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_13 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_14 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_15 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_1 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_2 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_3 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_4 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_6 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_9 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_10 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_11 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_12 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_13 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_14 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_15 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_3 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_5 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_6 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_7 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_9 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_11 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_12 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_13 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_1 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_2 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_3 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_4 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_5 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_9 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_10 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_11 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_13 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_14 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_15 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_1 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_9 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_11 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_13 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_1 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_2 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_3 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_4 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_5 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_9 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_10 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_11 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_13 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_14 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_15 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_1 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_2 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_6 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo = {bpd_csignals_decoded_andMatrixOutputs_lo_hi, bpd_csignals_decoded_andMatrixOutputs_lo_lo}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi = {bpd_csignals_decoded_andMatrixOutputs_hi_hi, bpd_csignals_decoded_andMatrixOutputs_hi_lo}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T = {bpd_csignals_decoded_andMatrixOutputs_hi, bpd_csignals_decoded_andMatrixOutputs_lo}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_5_2 = &_bpd_csignals_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_1 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_2 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_4 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_5 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_4 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_5 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_8 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_7 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_12 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_13 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_1 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_4 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_3 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_1}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_lo_1}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_1}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_1}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_1}; // @[pla.scala:91:29, :98:53] wire [4:0] bpd_csignals_decoded_andMatrixOutputs_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_hi_lo_1}; // @[pla.scala:98:53] wire [8:0] _bpd_csignals_decoded_andMatrixOutputs_T_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_1}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_9_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_2 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_3 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_4 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_5 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_6 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_7 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_8 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_12 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_15 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_3 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_3 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_6 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_7 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_8 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_2 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_2 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_4 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_5 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_5 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_6 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_7 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_1 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_2 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_3 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_4 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_5 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_6 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_7 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_1 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_2 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_3 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_4 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_5 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_6 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_7 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_1 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_2 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_3 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_4 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_5 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_6 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_7 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_1 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_1 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_2 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_3 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_4 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_5 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_6 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_lo_2}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_2}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_2}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_2}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_2}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_lo_2}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_2}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_14_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_2; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_1 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_2 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_3 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_4 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_1}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_1}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_1}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_2}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_1}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_lo_lo_3}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_3}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_3}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_1}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_lo_3}; // @[pla.scala:98:53] wire [13:0] _bpd_csignals_decoded_andMatrixOutputs_T_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_3, bpd_csignals_decoded_andMatrixOutputs_lo_3}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_0_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_3; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_2}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_1}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_2}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_2}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_2}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_lo_4}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_4}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_4}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_1}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_4}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_4}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_2}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_hi_lo_4}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_4}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_2_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_4; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_5 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_7 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_8 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_5}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_5}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_5}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_lo_5}; // @[pla.scala:98:53] wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_5 = {bpd_csignals_decoded_andMatrixOutputs_hi_5, bpd_csignals_decoded_andMatrixOutputs_lo_5}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_12_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_5; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_6 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_7 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_8 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_12 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_6 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_7 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_8 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_12 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_6}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_lo_5}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_6}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_6}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_lo_6}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_6 = {bpd_csignals_decoded_andMatrixOutputs_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_6}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_6_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_6; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_7}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_6}; // @[pla.scala:91:29, :98:53] wire [4:0] bpd_csignals_decoded_andMatrixOutputs_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_lo_6}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_7}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_7}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_7}; // @[pla.scala:90:45, :98:53] wire [4:0] bpd_csignals_decoded_andMatrixOutputs_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_lo_7}; // @[pla.scala:98:53] wire [9:0] _bpd_csignals_decoded_andMatrixOutputs_T_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_7}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_15_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_7; // @[pla.scala:98:{53,70}] wire _bpd_csignals_decoded_orMatrixOutputs_T_4 = bpd_csignals_decoded_andMatrixOutputs_15_2; // @[pla.scala:98:70, :114:36] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_8 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_10 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_14 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_8}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_8}; // @[pla.scala:90:45, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_8}; // @[pla.scala:90:45, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_8}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_hi_lo_8}; // @[pla.scala:98:53] wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_8 = {bpd_csignals_decoded_andMatrixOutputs_hi_8, bpd_csignals_decoded_andMatrixOutputs_lo_8}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_11_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_8; // @[pla.scala:98:{53,70}] wire _bpd_csignals_decoded_orMatrixOutputs_T_5 = bpd_csignals_decoded_andMatrixOutputs_11_2; // @[pla.scala:98:70, :114:36] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_7 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_10 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_11 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_14 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_15 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_3}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_2}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_4}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_3}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_9, bpd_csignals_decoded_andMatrixOutputs_lo_lo_7}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_7}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_9}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_2}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_9}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_9}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_3}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_9, bpd_csignals_decoded_andMatrixOutputs_hi_lo_9}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_9, bpd_csignals_decoded_andMatrixOutputs_lo_9}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_3_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_9; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_4}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_4}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_5}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_4}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_10, bpd_csignals_decoded_andMatrixOutputs_lo_lo_8}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_8}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_10}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_3}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_10}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_10}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_4}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_10, bpd_csignals_decoded_andMatrixOutputs_hi_lo_10}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_10, bpd_csignals_decoded_andMatrixOutputs_lo_10}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_7_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_10; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_9 = bpd_csignals_decoded_plaInput[13]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_9}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_11}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_11, bpd_csignals_decoded_andMatrixOutputs_lo_lo_9}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_11}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_11}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_11, bpd_csignals_decoded_andMatrixOutputs_hi_lo_11}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_11 = {bpd_csignals_decoded_andMatrixOutputs_hi_11, bpd_csignals_decoded_andMatrixOutputs_lo_11}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_1_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_11; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_10 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_6 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_8 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_9 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_10}; // @[pla.scala:90:45, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_12}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_12, bpd_csignals_decoded_andMatrixOutputs_lo_lo_10}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_12}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_12}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_12, bpd_csignals_decoded_andMatrixOutputs_hi_lo_12}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_12 = {bpd_csignals_decoded_andMatrixOutputs_hi_12, bpd_csignals_decoded_andMatrixOutputs_lo_12}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_13_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_12; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_5}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_4}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_5}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_6}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_5}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_13, bpd_csignals_decoded_andMatrixOutputs_lo_lo_11}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_11}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_13}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_4}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_13}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_13}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_5}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_13, bpd_csignals_decoded_andMatrixOutputs_hi_lo_13}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_13, bpd_csignals_decoded_andMatrixOutputs_lo_13}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_4_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_13; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_6}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_5}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_6}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_7}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_6}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_14 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_14, bpd_csignals_decoded_andMatrixOutputs_lo_lo_12}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_12}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_14}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_5}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_14}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_14}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_6}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_14, bpd_csignals_decoded_andMatrixOutputs_hi_lo_14}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_14, bpd_csignals_decoded_andMatrixOutputs_lo_14}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_8_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_14; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_7}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_6}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_7}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_8}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_7}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_15 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_15, bpd_csignals_decoded_andMatrixOutputs_lo_lo_13}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_13}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_15}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_6}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_15}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_15}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_9, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_7}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_15, bpd_csignals_decoded_andMatrixOutputs_hi_lo_15}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_15, bpd_csignals_decoded_andMatrixOutputs_lo_15}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_10_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_15; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo = {bpd_csignals_decoded_andMatrixOutputs_2_2, bpd_csignals_decoded_andMatrixOutputs_10_2}; // @[pla.scala:98:70, :114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi = {bpd_csignals_decoded_andMatrixOutputs_14_2, bpd_csignals_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19] wire [3:0] _bpd_csignals_decoded_orMatrixOutputs_T = {bpd_csignals_decoded_orMatrixOutputs_hi, bpd_csignals_decoded_orMatrixOutputs_lo}; // @[pla.scala:114:19] wire _bpd_csignals_decoded_orMatrixOutputs_T_1 = |_bpd_csignals_decoded_orMatrixOutputs_T; // @[pla.scala:114:{19,36}] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_8_2, bpd_csignals_decoded_andMatrixOutputs_10_2}; // @[pla.scala:98:70, :114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_7_2, bpd_csignals_decoded_andMatrixOutputs_1_2}; // @[pla.scala:98:70, :114:19] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_lo_hi = {bpd_csignals_decoded_orMatrixOutputs_lo_hi_hi, bpd_csignals_decoded_andMatrixOutputs_4_2}; // @[pla.scala:98:70, :114:19] wire [4:0] bpd_csignals_decoded_orMatrixOutputs_lo_1 = {bpd_csignals_decoded_orMatrixOutputs_lo_hi, bpd_csignals_decoded_orMatrixOutputs_lo_lo}; // @[pla.scala:114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_0_2, bpd_csignals_decoded_andMatrixOutputs_12_2}; // @[pla.scala:98:70, :114:19] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_lo = {bpd_csignals_decoded_orMatrixOutputs_hi_lo_hi, bpd_csignals_decoded_andMatrixOutputs_3_2}; // @[pla.scala:98:70, :114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_5_2, bpd_csignals_decoded_andMatrixOutputs_9_2}; // @[pla.scala:98:70, :114:19] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi = {bpd_csignals_decoded_orMatrixOutputs_hi_hi_hi, bpd_csignals_decoded_andMatrixOutputs_14_2}; // @[pla.scala:98:70, :114:19] wire [5:0] bpd_csignals_decoded_orMatrixOutputs_hi_1 = {bpd_csignals_decoded_orMatrixOutputs_hi_hi, bpd_csignals_decoded_orMatrixOutputs_hi_lo}; // @[pla.scala:114:19] wire [10:0] _bpd_csignals_decoded_orMatrixOutputs_T_2 = {bpd_csignals_decoded_orMatrixOutputs_hi_1, bpd_csignals_decoded_orMatrixOutputs_lo_1}; // @[pla.scala:114:19] wire _bpd_csignals_decoded_orMatrixOutputs_T_3 = |_bpd_csignals_decoded_orMatrixOutputs_T_2; // @[pla.scala:114:{19,36}] wire [1:0] _bpd_csignals_decoded_orMatrixOutputs_T_6 = {bpd_csignals_decoded_andMatrixOutputs_6_2, bpd_csignals_decoded_andMatrixOutputs_13_2}; // @[pla.scala:98:70, :114:19] wire _bpd_csignals_decoded_orMatrixOutputs_T_7 = |_bpd_csignals_decoded_orMatrixOutputs_T_6; // @[pla.scala:114:{19,36}] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_2 = {_bpd_csignals_decoded_orMatrixOutputs_T_3, _bpd_csignals_decoded_orMatrixOutputs_T_1}; // @[pla.scala:102:36, :114:36] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi_1 = {_bpd_csignals_decoded_orMatrixOutputs_T_7, _bpd_csignals_decoded_orMatrixOutputs_T_5}; // @[pla.scala:102:36, :114:36] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_2 = {bpd_csignals_decoded_orMatrixOutputs_hi_hi_1, _bpd_csignals_decoded_orMatrixOutputs_T_4}; // @[pla.scala:102:36, :114:36] wire [4:0] bpd_csignals_decoded_orMatrixOutputs = {bpd_csignals_decoded_orMatrixOutputs_hi_2, bpd_csignals_decoded_orMatrixOutputs_lo_2}; // @[pla.scala:102:36] wire _bpd_csignals_decoded_invMatrixOutputs_T = bpd_csignals_decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_1 = bpd_csignals_decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_2 = bpd_csignals_decoded_orMatrixOutputs[2]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_3 = bpd_csignals_decoded_orMatrixOutputs[3]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_4 = bpd_csignals_decoded_orMatrixOutputs[4]; // @[pla.scala:102:36, :124:31] wire [1:0] bpd_csignals_decoded_invMatrixOutputs_lo = {_bpd_csignals_decoded_invMatrixOutputs_T_1, _bpd_csignals_decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31] wire [1:0] bpd_csignals_decoded_invMatrixOutputs_hi_hi = {_bpd_csignals_decoded_invMatrixOutputs_T_4, _bpd_csignals_decoded_invMatrixOutputs_T_3}; // @[pla.scala:120:37, :124:31] wire [2:0] bpd_csignals_decoded_invMatrixOutputs_hi = {bpd_csignals_decoded_invMatrixOutputs_hi_hi, _bpd_csignals_decoded_invMatrixOutputs_T_2}; // @[pla.scala:120:37, :124:31] assign bpd_csignals_decoded_invMatrixOutputs = {bpd_csignals_decoded_invMatrixOutputs_hi, bpd_csignals_decoded_invMatrixOutputs_lo}; // @[pla.scala:120:37] assign bpd_csignals_decoded = bpd_csignals_decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37] wire bpd_csignals_0 = bpd_csignals_decoded[4]; // @[pla.scala:81:23] wire cs_is_br = bpd_csignals_0; // @[Decode.scala:50:77] wire bpd_csignals_1 = bpd_csignals_decoded[3]; // @[pla.scala:81:23] wire cs_is_jal = bpd_csignals_1; // @[Decode.scala:50:77] wire bpd_csignals_2 = bpd_csignals_decoded[2]; // @[pla.scala:81:23] wire cs_is_jalr = bpd_csignals_2; // @[Decode.scala:50:77] wire bpd_csignals_3 = bpd_csignals_decoded[1]; // @[pla.scala:81:23] wire cs_is_shadowable = bpd_csignals_3; // @[Decode.scala:50:77] wire bpd_csignals_4 = bpd_csignals_decoded[0]; // @[pla.scala:81:23] wire cs_has_rs2 = bpd_csignals_4; // @[Decode.scala:50:77] wire _io_out_is_call_T = cs_is_jal | cs_is_jalr; // @[decode.scala:689:34, :690:35, :694:32] wire [4:0] _io_out_is_call_T_1 = io_inst_0[11:7]; // @[decode.scala:623:7] wire [4:0] _io_out_is_ret_T_4 = io_inst_0[11:7]; // @[decode.scala:623:7] wire [4:0] _io_out_shadowable_T_2 = io_inst_0[11:7]; // @[decode.scala:623:7] wire _io_out_is_call_T_2 = _io_out_is_call_T_1 == 5'h1; // @[decode.scala:694:65] assign _io_out_is_call_T_3 = _io_out_is_call_T & _io_out_is_call_T_2; // @[decode.scala:694:{32,47,65}] assign io_out_is_call_0 = _io_out_is_call_T_3; // @[decode.scala:623:7, :694:47] wire [4:0] _io_out_is_ret_T = io_inst_0[19:15]; // @[decode.scala:623:7] wire [4:0] _io_out_shadowable_T_1 = io_inst_0[19:15]; // @[decode.scala:623:7] wire [4:0] _io_out_shadowable_T_7 = io_inst_0[19:15]; // @[decode.scala:623:7] wire [4:0] _io_out_is_ret_T_1 = _io_out_is_ret_T & 5'h1B; // @[decode.scala:695:51] wire _io_out_is_ret_T_2 = _io_out_is_ret_T_1 == 5'h1; // @[decode.scala:695:51] wire _io_out_is_ret_T_3 = cs_is_jalr & _io_out_is_ret_T_2; // @[decode.scala:690:35, :695:{32,51}] wire _io_out_is_ret_T_5 = _io_out_is_ret_T_4 == 5'h0; // @[decode.scala:695:90] assign _io_out_is_ret_T_6 = _io_out_is_ret_T_3 & _io_out_is_ret_T_5; // @[decode.scala:695:{32,72,90}] assign io_out_is_ret_0 = _io_out_is_ret_T_6; // @[decode.scala:623:7, :695:72] wire _io_out_target_b_imm32_T = io_inst_0[31]; // @[decode.scala:623:7] wire _io_out_target_j_imm32_T = io_inst_0[31]; // @[decode.scala:623:7] wire _io_out_sfb_offset_valid_T = io_inst_0[31]; // @[decode.scala:623:7, :710:50] wire [19:0] _io_out_target_b_imm32_T_1 = {20{_io_out_target_b_imm32_T}}; // @[consts.scala:337:{27,35}] wire _io_out_target_b_imm32_T_2 = io_inst_0[7]; // @[decode.scala:623:7] wire _br_offset_T = io_inst_0[7]; // @[decode.scala:623:7, :708:30] wire [5:0] _io_out_target_b_imm32_T_3 = io_inst_0[30:25]; // @[decode.scala:623:7] wire [5:0] _io_out_target_j_imm32_T_4 = io_inst_0[30:25]; // @[decode.scala:623:7] wire [5:0] _br_offset_T_1 = io_inst_0[30:25]; // @[decode.scala:623:7, :708:42] wire [3:0] _io_out_target_b_imm32_T_4 = io_inst_0[11:8]; // @[decode.scala:623:7] wire [3:0] _br_offset_T_2 = io_inst_0[11:8]; // @[decode.scala:623:7, :708:58] wire [4:0] io_out_target_b_imm32_lo = {_io_out_target_b_imm32_T_4, 1'h0}; // @[consts.scala:337:{22,68}] wire [20:0] io_out_target_b_imm32_hi_hi = {_io_out_target_b_imm32_T_1, _io_out_target_b_imm32_T_2}; // @[consts.scala:337:{22,27,46}] wire [26:0] io_out_target_b_imm32_hi = {io_out_target_b_imm32_hi_hi, _io_out_target_b_imm32_T_3}; // @[consts.scala:337:{22,55}] wire [31:0] io_out_target_b_imm32 = {io_out_target_b_imm32_hi, io_out_target_b_imm32_lo}; // @[consts.scala:337:22] wire [31:0] _io_out_target_T_1 = io_out_target_b_imm32; // @[consts.scala:337:22, :338:27] wire [40:0] _io_out_target_T_2 = {_io_out_target_T[39], _io_out_target_T} + {{9{_io_out_target_T_1[31]}}, _io_out_target_T_1}; // @[consts.scala:338:{10,17,27}] wire [39:0] _io_out_target_T_3 = _io_out_target_T_2[39:0]; // @[consts.scala:338:17] wire [39:0] _io_out_target_T_4 = _io_out_target_T_3; // @[consts.scala:338:17] wire [39:0] _io_out_target_T_5 = _io_out_target_T_4 & 40'hFFFFFFFFFE; // @[consts.scala:338:{17,42}] wire [39:0] _io_out_target_T_6 = _io_out_target_T_5; // @[consts.scala:338:42] wire [39:0] _io_out_target_T_7 = _io_out_target_T_6; // @[consts.scala:338:{42,52}] wire [11:0] _io_out_target_j_imm32_T_1 = {12{_io_out_target_j_imm32_T}}; // @[consts.scala:343:{27,35}] wire [7:0] _io_out_target_j_imm32_T_2 = io_inst_0[19:12]; // @[decode.scala:623:7] wire _io_out_target_j_imm32_T_3 = io_inst_0[20]; // @[decode.scala:623:7] wire [3:0] _io_out_target_j_imm32_T_5 = io_inst_0[24:21]; // @[decode.scala:623:7] wire [9:0] io_out_target_j_imm32_lo_hi = {_io_out_target_j_imm32_T_4, _io_out_target_j_imm32_T_5}; // @[consts.scala:343:{22,69,82}] wire [10:0] io_out_target_j_imm32_lo = {io_out_target_j_imm32_lo_hi, 1'h0}; // @[consts.scala:343:22] wire [19:0] io_out_target_j_imm32_hi_hi = {_io_out_target_j_imm32_T_1, _io_out_target_j_imm32_T_2}; // @[consts.scala:343:{22,27,46}] wire [20:0] io_out_target_j_imm32_hi = {io_out_target_j_imm32_hi_hi, _io_out_target_j_imm32_T_3}; // @[consts.scala:343:{22,59}] wire [31:0] io_out_target_j_imm32 = {io_out_target_j_imm32_hi, io_out_target_j_imm32_lo}; // @[consts.scala:343:22] wire [31:0] _io_out_target_T_9 = io_out_target_j_imm32; // @[consts.scala:343:22, :344:27] wire [40:0] _io_out_target_T_10 = {_io_out_target_T_8[39], _io_out_target_T_8} + {{9{_io_out_target_T_9[31]}}, _io_out_target_T_9}; // @[consts.scala:344:{10,17,27}] wire [39:0] _io_out_target_T_11 = _io_out_target_T_10[39:0]; // @[consts.scala:344:17] wire [39:0] _io_out_target_T_12 = _io_out_target_T_11; // @[consts.scala:344:17] wire [39:0] _io_out_target_T_13 = _io_out_target_T_12 & 40'hFFFFFFFFFE; // @[consts.scala:344:{17,42}] wire [39:0] _io_out_target_T_14 = _io_out_target_T_13; // @[consts.scala:344:42] wire [39:0] _io_out_target_T_15 = _io_out_target_T_14; // @[consts.scala:344:{42,52}] assign _io_out_target_T_16 = cs_is_br ? _io_out_target_T_7 : _io_out_target_T_15; // @[decode.scala:688:33, :697:23] assign io_out_target_0 = _io_out_target_T_16; // @[decode.scala:623:7, :697:23] wire [2:0] _io_out_cfi_type_T = {2'h0, cs_is_br}; // @[decode.scala:688:33, :704:8] wire [2:0] _io_out_cfi_type_T_1 = cs_is_jal ? 3'h2 : _io_out_cfi_type_T; // @[decode.scala:689:34, :702:8, :704:8] assign _io_out_cfi_type_T_2 = cs_is_jalr ? 3'h3 : _io_out_cfi_type_T_1; // @[decode.scala:690:35, :700:8, :702:8] assign io_out_cfi_type_0 = _io_out_cfi_type_T_2; // @[decode.scala:623:7, :700:8] wire [4:0] br_offset_lo = {_br_offset_T_2, 1'h0}; // @[decode.scala:708:{22,58}] wire [6:0] br_offset_hi = {_br_offset_T, _br_offset_T_1}; // @[decode.scala:708:{22,30,42}] wire [11:0] br_offset = {br_offset_hi, br_offset_lo}; // @[decode.scala:708:22] wire _io_out_sfb_offset_valid_T_1 = ~_io_out_sfb_offset_valid_T; // @[decode.scala:710:{42,50}] wire _io_out_sfb_offset_valid_T_2 = cs_is_br & _io_out_sfb_offset_valid_T_1; // @[decode.scala:688:33, :710:{39,42}] wire _io_out_sfb_offset_valid_T_3 = |br_offset; // @[decode.scala:708:22, :710:68] wire _io_out_sfb_offset_valid_T_4 = _io_out_sfb_offset_valid_T_2 & _io_out_sfb_offset_valid_T_3; // @[decode.scala:710:{39,55,68}] wire [5:0] _io_out_sfb_offset_valid_T_5 = br_offset[11:6]; // @[decode.scala:708:22, :710:90] wire _io_out_sfb_offset_valid_T_6 = _io_out_sfb_offset_valid_T_5 == 6'h0; // @[decode.scala:710:{90,117}] assign _io_out_sfb_offset_valid_T_7 = _io_out_sfb_offset_valid_T_4 & _io_out_sfb_offset_valid_T_6; // @[decode.scala:710:{55,76,117}] assign io_out_sfb_offset_valid_0 = _io_out_sfb_offset_valid_T_7; // @[decode.scala:623:7, :710:76] assign io_out_sfb_offset_bits_0 = br_offset[5:0]; // @[decode.scala:623:7, :708:22, :711:27] wire _io_out_shadowable_T = ~cs_has_rs2; // @[decode.scala:692:35, :713:5] wire _io_out_shadowable_T_3 = _io_out_shadowable_T_1 == _io_out_shadowable_T_2; // @[decode.scala:714:22] wire _io_out_shadowable_T_4 = _io_out_shadowable_T | _io_out_shadowable_T_3; // @[decode.scala:713:{5,17}, :714:22] wire [31:0] _io_out_shadowable_T_5 = io_inst_0 & 32'hFE00707F; // @[decode.scala:623:7, :715:14] wire _io_out_shadowable_T_6 = _io_out_shadowable_T_5 == 32'h33; // @[decode.scala:715:14] wire _io_out_shadowable_T_8 = _io_out_shadowable_T_7 == 5'h0; // @[decode.scala:695:90, :715:41] wire _io_out_shadowable_T_9 = _io_out_shadowable_T_6 & _io_out_shadowable_T_8; // @[decode.scala:715:{14,22,41}] wire _io_out_shadowable_T_10 = _io_out_shadowable_T_4 | _io_out_shadowable_T_9; // @[decode.scala:713:17, :714:42, :715:22] assign _io_out_shadowable_T_11 = cs_is_shadowable & _io_out_shadowable_T_10; // @[decode.scala:691:41, :712:41, :714:42] assign io_out_shadowable_0 = _io_out_shadowable_T_11; // @[decode.scala:623:7, :712:41] assign io_out_is_ret = io_out_is_ret_0; // @[decode.scala:623:7] assign io_out_is_call = io_out_is_call_0; // @[decode.scala:623:7] assign io_out_target = io_out_target_0; // @[decode.scala:623:7] assign io_out_cfi_type = io_out_cfi_type_0; // @[decode.scala:623:7] assign io_out_sfb_offset_valid = io_out_sfb_offset_valid_0; // @[decode.scala:623:7] assign io_out_sfb_offset_bits = io_out_sfb_offset_bits_0; // @[decode.scala:623:7] assign io_out_shadowable = io_out_shadowable_0; // @[decode.scala:623:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File register-read.scala: //****************************************************************************** // Copyright (c) 2012 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Register Read //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ /** * Handle the register read and bypass network for the OoO backend * interfaces with the issue window on the enqueue side, and the execution * pipelines on the dequeue side. * * @param issueWidth total issue width from all issue queues * @param supportedUnitsArray seq of SupportedFuncUnits classes indicating what the functional units do * @param numTotalReadPorts number of read ports * @param numReadPortsArray execution units read port sequence * @param numTotalBypassPorts number of bypass ports out of the execution units * @param registerWidth size of register in bits */ class RegisterRead( issueWidth: Int, supportedUnitsArray: Seq[SupportedFuncUnits], numTotalReadPorts: Int, numReadPortsArray: Seq[Int], // each exe_unit must tell us how many max // operands it can accept (the sum should equal // numTotalReadPorts) numTotalBypassPorts: Int, numTotalPredBypassPorts: Int, registerWidth: Int )(implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { // issued micro-ops val iss_valids = Input(Vec(issueWidth, Bool())) val iss_uops = Input(Vec(issueWidth, new MicroOp())) // interface with register file's read ports val rf_read_ports = Flipped(Vec(numTotalReadPorts, new RegisterFileReadPortIO(maxPregSz, registerWidth))) val prf_read_ports = Flipped(Vec(issueWidth, new RegisterFileReadPortIO(log2Ceil(ftqSz), 1))) val bypass = Input(Vec(numTotalBypassPorts, Valid(new ExeUnitResp(registerWidth)))) val pred_bypass = Input(Vec(numTotalPredBypassPorts, Valid(new ExeUnitResp(1)))) // send micro-ops to the execution pipelines val exe_reqs = Vec(issueWidth, (new DecoupledIO(new FuncUnitReq(registerWidth)))) val kill = Input(Bool()) val brupdate = Input(new BrUpdateInfo()) }) val rrd_valids = Wire(Vec(issueWidth, Bool())) val rrd_uops = Wire(Vec(issueWidth, new MicroOp())) val exe_reg_valids = RegInit(VecInit(Seq.fill(issueWidth) { false.B })) val exe_reg_uops = Reg(Vec(issueWidth, new MicroOp())) val exe_reg_rs1_data = Reg(Vec(issueWidth, Bits(registerWidth.W))) val exe_reg_rs2_data = Reg(Vec(issueWidth, Bits(registerWidth.W))) val exe_reg_rs3_data = Reg(Vec(issueWidth, Bits(registerWidth.W))) val exe_reg_pred_data = Reg(Vec(issueWidth, Bool())) //------------------------------------------------------------- // hook up inputs for (w <- 0 until issueWidth) { val rrd_decode_unit = Module(new RegisterReadDecode(supportedUnitsArray(w))) rrd_decode_unit.io.iss_valid := io.iss_valids(w) rrd_decode_unit.io.iss_uop := io.iss_uops(w) rrd_valids(w) := RegNext(rrd_decode_unit.io.rrd_valid && !IsKilledByBranch(io.brupdate, rrd_decode_unit.io.rrd_uop)) rrd_uops(w) := RegNext(GetNewUopAndBrMask(rrd_decode_unit.io.rrd_uop, io.brupdate)) } //------------------------------------------------------------- // read ports require (numTotalReadPorts == numReadPortsArray.reduce(_+_)) val rrd_rs1_data = Wire(Vec(issueWidth, Bits(registerWidth.W))) val rrd_rs2_data = Wire(Vec(issueWidth, Bits(registerWidth.W))) val rrd_rs3_data = Wire(Vec(issueWidth, Bits(registerWidth.W))) val rrd_pred_data = Wire(Vec(issueWidth, Bool())) rrd_rs1_data := DontCare rrd_rs2_data := DontCare rrd_rs3_data := DontCare rrd_pred_data := DontCare io.prf_read_ports := DontCare var idx = 0 // index into flattened read_ports array for (w <- 0 until issueWidth) { val numReadPorts = numReadPortsArray(w) // NOTE: // rrdLatency==1, we need to send read address at end of ISS stage, // in order to get read data back at end of RRD stage. val rs1_addr = io.iss_uops(w).prs1 val rs2_addr = io.iss_uops(w).prs2 val rs3_addr = io.iss_uops(w).prs3 val pred_addr = io.iss_uops(w).ppred if (numReadPorts > 0) io.rf_read_ports(idx+0).addr := rs1_addr if (numReadPorts > 1) io.rf_read_ports(idx+1).addr := rs2_addr if (numReadPorts > 2) io.rf_read_ports(idx+2).addr := rs3_addr if (enableSFBOpt) io.prf_read_ports(w).addr := pred_addr if (numReadPorts > 0) rrd_rs1_data(w) := Mux(RegNext(rs1_addr === 0.U), 0.U, io.rf_read_ports(idx+0).data) if (numReadPorts > 1) rrd_rs2_data(w) := Mux(RegNext(rs2_addr === 0.U), 0.U, io.rf_read_ports(idx+1).data) if (numReadPorts > 2) rrd_rs3_data(w) := Mux(RegNext(rs3_addr === 0.U), 0.U, io.rf_read_ports(idx+2).data) if (enableSFBOpt) rrd_pred_data(w) := Mux(RegNext(io.iss_uops(w).is_sfb_shadow), io.prf_read_ports(w).data, false.B) val rrd_kill = io.kill || IsKilledByBranch(io.brupdate, rrd_uops(w)) exe_reg_valids(w) := Mux(rrd_kill, false.B, rrd_valids(w)) // TODO use only the valids signal, don't require us to set nullUop exe_reg_uops(w) := Mux(rrd_kill, NullMicroOp, rrd_uops(w)) exe_reg_uops(w).br_mask := GetNewBrMask(io.brupdate, rrd_uops(w)) idx += numReadPorts } //------------------------------------------------------------- //------------------------------------------------------------- // BYPASS MUXES ----------------------------------------------- // performed at the end of the register read stage // NOTES: this code is fairly hard-coded. Sorry. // ASSUMPTIONS: // - rs3 is used for FPU ops which are NOT bypassed (so don't check // them!). // - only bypass integer registers. val bypassed_rs1_data = Wire(Vec(issueWidth, Bits(registerWidth.W))) val bypassed_rs2_data = Wire(Vec(issueWidth, Bits(registerWidth.W))) val bypassed_pred_data = Wire(Vec(issueWidth, Bool())) bypassed_pred_data := DontCare for (w <- 0 until issueWidth) { val numReadPorts = numReadPortsArray(w) var rs1_cases = Array((false.B, 0.U(registerWidth.W))) var rs2_cases = Array((false.B, 0.U(registerWidth.W))) var pred_cases = Array((false.B, 0.U(1.W))) val prs1 = rrd_uops(w).prs1 val lrs1_rtype = rrd_uops(w).lrs1_rtype val prs2 = rrd_uops(w).prs2 val lrs2_rtype = rrd_uops(w).lrs2_rtype val ppred = rrd_uops(w).ppred for (b <- 0 until numTotalBypassPorts) { val bypass = io.bypass(b) // can't use "io.bypass.valid(b) since it would create a combinational loop on branch kills" rs1_cases ++= Array((bypass.valid && (prs1 === bypass.bits.uop.pdst) && bypass.bits.uop.rf_wen && bypass.bits.uop.dst_rtype === RT_FIX && lrs1_rtype === RT_FIX && (prs1 =/= 0.U), bypass.bits.data)) rs2_cases ++= Array((bypass.valid && (prs2 === bypass.bits.uop.pdst) && bypass.bits.uop.rf_wen && bypass.bits.uop.dst_rtype === RT_FIX && lrs2_rtype === RT_FIX && (prs2 =/= 0.U), bypass.bits.data)) } for (b <- 0 until numTotalPredBypassPorts) { val bypass = io.pred_bypass(b) pred_cases ++= Array((bypass.valid && (ppred === bypass.bits.uop.pdst) && bypass.bits.uop.is_sfb_br, bypass.bits.data)) } if (numReadPorts > 0) bypassed_rs1_data(w) := MuxCase(rrd_rs1_data(w), rs1_cases) if (numReadPorts > 1) bypassed_rs2_data(w) := MuxCase(rrd_rs2_data(w), rs2_cases) if (enableSFBOpt) bypassed_pred_data(w) := MuxCase(rrd_pred_data(w), pred_cases) } //------------------------------------------------------------- //------------------------------------------------------------- // **** Execute Stage **** //------------------------------------------------------------- //------------------------------------------------------------- for (w <- 0 until issueWidth) { val numReadPorts = numReadPortsArray(w) if (numReadPorts > 0) exe_reg_rs1_data(w) := bypassed_rs1_data(w) if (numReadPorts > 1) exe_reg_rs2_data(w) := bypassed_rs2_data(w) if (numReadPorts > 2) exe_reg_rs3_data(w) := rrd_rs3_data(w) if (enableSFBOpt) exe_reg_pred_data(w) := bypassed_pred_data(w) // ASSUMPTION: rs3 is FPU which is NOT bypassed } // TODO add assert to detect bypass conflicts on non-bypassable things // TODO add assert that checks bypassing to verify there isn't something it hits rs3 //------------------------------------------------------------- // set outputs to execute pipelines for (w <- 0 until issueWidth) { val numReadPorts = numReadPortsArray(w) io.exe_reqs(w).valid := exe_reg_valids(w) io.exe_reqs(w).bits := DontCare io.exe_reqs(w).bits.uop := exe_reg_uops(w) if (numReadPorts > 0) io.exe_reqs(w).bits.rs1_data := exe_reg_rs1_data(w) if (numReadPorts > 1) io.exe_reqs(w).bits.rs2_data := exe_reg_rs2_data(w) if (numReadPorts > 2) io.exe_reqs(w).bits.rs3_data := exe_reg_rs3_data(w) if (enableSFBOpt) io.exe_reqs(w).bits.pred_data := exe_reg_pred_data(w) } } File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) }
module RegisterRead_2( // @[register-read.scala:34:7] input clock, // @[register-read.scala:34:7] input reset, // @[register-read.scala:34:7] input io_iss_valids_0, // @[register-read.scala:47:14] input [6:0] io_iss_uops_0_uopc, // @[register-read.scala:47:14] input [31:0] io_iss_uops_0_inst, // @[register-read.scala:47:14] input [31:0] io_iss_uops_0_debug_inst, // @[register-read.scala:47:14] input io_iss_uops_0_is_rvc, // @[register-read.scala:47:14] input [39:0] io_iss_uops_0_debug_pc, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_iq_type, // @[register-read.scala:47:14] input [9:0] io_iss_uops_0_fu_code, // @[register-read.scala:47:14] input [3:0] io_iss_uops_0_ctrl_br_type, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_ctrl_op1_sel, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_ctrl_op2_sel, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_ctrl_imm_sel, // @[register-read.scala:47:14] input [4:0] io_iss_uops_0_ctrl_op_fcn, // @[register-read.scala:47:14] input io_iss_uops_0_ctrl_fcn_dw, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_ctrl_csr_cmd, // @[register-read.scala:47:14] input io_iss_uops_0_ctrl_is_load, // @[register-read.scala:47:14] input io_iss_uops_0_ctrl_is_sta, // @[register-read.scala:47:14] input io_iss_uops_0_ctrl_is_std, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_iw_state, // @[register-read.scala:47:14] input io_iss_uops_0_is_br, // @[register-read.scala:47:14] input io_iss_uops_0_is_jalr, // @[register-read.scala:47:14] input io_iss_uops_0_is_jal, // @[register-read.scala:47:14] input io_iss_uops_0_is_sfb, // @[register-read.scala:47:14] input [7:0] io_iss_uops_0_br_mask, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_br_tag, // @[register-read.scala:47:14] input [3:0] io_iss_uops_0_ftq_idx, // @[register-read.scala:47:14] input io_iss_uops_0_edge_inst, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_pc_lob, // @[register-read.scala:47:14] input io_iss_uops_0_taken, // @[register-read.scala:47:14] input [19:0] io_iss_uops_0_imm_packed, // @[register-read.scala:47:14] input [11:0] io_iss_uops_0_csr_addr, // @[register-read.scala:47:14] input [4:0] io_iss_uops_0_rob_idx, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_ldq_idx, // @[register-read.scala:47:14] input [2:0] io_iss_uops_0_stq_idx, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_rxq_idx, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_pdst, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_prs1, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_prs2, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_prs3, // @[register-read.scala:47:14] input [3:0] io_iss_uops_0_ppred, // @[register-read.scala:47:14] input io_iss_uops_0_prs1_busy, // @[register-read.scala:47:14] input io_iss_uops_0_prs2_busy, // @[register-read.scala:47:14] input io_iss_uops_0_prs3_busy, // @[register-read.scala:47:14] input io_iss_uops_0_ppred_busy, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_stale_pdst, // @[register-read.scala:47:14] input io_iss_uops_0_exception, // @[register-read.scala:47:14] input [63:0] io_iss_uops_0_exc_cause, // @[register-read.scala:47:14] input io_iss_uops_0_bypassable, // @[register-read.scala:47:14] input [4:0] io_iss_uops_0_mem_cmd, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_mem_size, // @[register-read.scala:47:14] input io_iss_uops_0_mem_signed, // @[register-read.scala:47:14] input io_iss_uops_0_is_fence, // @[register-read.scala:47:14] input io_iss_uops_0_is_fencei, // @[register-read.scala:47:14] input io_iss_uops_0_is_amo, // @[register-read.scala:47:14] input io_iss_uops_0_uses_ldq, // @[register-read.scala:47:14] input io_iss_uops_0_uses_stq, // @[register-read.scala:47:14] input io_iss_uops_0_is_sys_pc2epc, // @[register-read.scala:47:14] input io_iss_uops_0_is_unique, // @[register-read.scala:47:14] input io_iss_uops_0_flush_on_commit, // @[register-read.scala:47:14] input io_iss_uops_0_ldst_is_rs1, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_ldst, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_lrs1, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_lrs2, // @[register-read.scala:47:14] input [5:0] io_iss_uops_0_lrs3, // @[register-read.scala:47:14] input io_iss_uops_0_ldst_val, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_dst_rtype, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_lrs1_rtype, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_lrs2_rtype, // @[register-read.scala:47:14] input io_iss_uops_0_frs3_en, // @[register-read.scala:47:14] input io_iss_uops_0_fp_val, // @[register-read.scala:47:14] input io_iss_uops_0_fp_single, // @[register-read.scala:47:14] input io_iss_uops_0_xcpt_pf_if, // @[register-read.scala:47:14] input io_iss_uops_0_xcpt_ae_if, // @[register-read.scala:47:14] input io_iss_uops_0_xcpt_ma_if, // @[register-read.scala:47:14] input io_iss_uops_0_bp_debug_if, // @[register-read.scala:47:14] input io_iss_uops_0_bp_xcpt_if, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_debug_fsrc, // @[register-read.scala:47:14] input [1:0] io_iss_uops_0_debug_tsrc, // @[register-read.scala:47:14] output [5:0] io_rf_read_ports_0_addr, // @[register-read.scala:47:14] input [64:0] io_rf_read_ports_0_data, // @[register-read.scala:47:14] output [5:0] io_rf_read_ports_1_addr, // @[register-read.scala:47:14] input [64:0] io_rf_read_ports_1_data, // @[register-read.scala:47:14] output [5:0] io_rf_read_ports_2_addr, // @[register-read.scala:47:14] input [64:0] io_rf_read_ports_2_data, // @[register-read.scala:47:14] output io_exe_reqs_0_valid, // @[register-read.scala:47:14] output [6:0] io_exe_reqs_0_bits_uop_uopc, // @[register-read.scala:47:14] output [31:0] io_exe_reqs_0_bits_uop_inst, // @[register-read.scala:47:14] output [31:0] io_exe_reqs_0_bits_uop_debug_inst, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_rvc, // @[register-read.scala:47:14] output [39:0] io_exe_reqs_0_bits_uop_debug_pc, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_iq_type, // @[register-read.scala:47:14] output [9:0] io_exe_reqs_0_bits_uop_fu_code, // @[register-read.scala:47:14] output [3:0] io_exe_reqs_0_bits_uop_ctrl_br_type, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_ctrl_op1_sel, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_ctrl_op2_sel, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_ctrl_imm_sel, // @[register-read.scala:47:14] output [4:0] io_exe_reqs_0_bits_uop_ctrl_op_fcn, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ctrl_fcn_dw, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_ctrl_csr_cmd, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ctrl_is_load, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ctrl_is_sta, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ctrl_is_std, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_iw_state, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_iw_p1_poisoned, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_iw_p2_poisoned, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_br, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_jalr, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_jal, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_sfb, // @[register-read.scala:47:14] output [7:0] io_exe_reqs_0_bits_uop_br_mask, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_br_tag, // @[register-read.scala:47:14] output [3:0] io_exe_reqs_0_bits_uop_ftq_idx, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_edge_inst, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_pc_lob, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_taken, // @[register-read.scala:47:14] output [19:0] io_exe_reqs_0_bits_uop_imm_packed, // @[register-read.scala:47:14] output [11:0] io_exe_reqs_0_bits_uop_csr_addr, // @[register-read.scala:47:14] output [4:0] io_exe_reqs_0_bits_uop_rob_idx, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_ldq_idx, // @[register-read.scala:47:14] output [2:0] io_exe_reqs_0_bits_uop_stq_idx, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_rxq_idx, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_pdst, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_prs1, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_prs2, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_prs3, // @[register-read.scala:47:14] output [3:0] io_exe_reqs_0_bits_uop_ppred, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_prs1_busy, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_prs2_busy, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_prs3_busy, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ppred_busy, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_stale_pdst, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_exception, // @[register-read.scala:47:14] output [63:0] io_exe_reqs_0_bits_uop_exc_cause, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_bypassable, // @[register-read.scala:47:14] output [4:0] io_exe_reqs_0_bits_uop_mem_cmd, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_mem_size, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_mem_signed, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_fence, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_fencei, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_amo, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_uses_ldq, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_uses_stq, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_sys_pc2epc, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_is_unique, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_flush_on_commit, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ldst_is_rs1, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_ldst, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_lrs1, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_lrs2, // @[register-read.scala:47:14] output [5:0] io_exe_reqs_0_bits_uop_lrs3, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_ldst_val, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_dst_rtype, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_lrs1_rtype, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_lrs2_rtype, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_frs3_en, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_fp_val, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_fp_single, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_xcpt_pf_if, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_xcpt_ae_if, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_xcpt_ma_if, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_bp_debug_if, // @[register-read.scala:47:14] output io_exe_reqs_0_bits_uop_bp_xcpt_if, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_debug_fsrc, // @[register-read.scala:47:14] output [1:0] io_exe_reqs_0_bits_uop_debug_tsrc, // @[register-read.scala:47:14] output [64:0] io_exe_reqs_0_bits_rs1_data, // @[register-read.scala:47:14] output [64:0] io_exe_reqs_0_bits_rs2_data, // @[register-read.scala:47:14] output [64:0] io_exe_reqs_0_bits_rs3_data, // @[register-read.scala:47:14] input io_kill, // @[register-read.scala:47:14] input [7:0] io_brupdate_b1_resolve_mask, // @[register-read.scala:47:14] input [7:0] io_brupdate_b1_mispredict_mask, // @[register-read.scala:47:14] input [6:0] io_brupdate_b2_uop_uopc, // @[register-read.scala:47:14] input [31:0] io_brupdate_b2_uop_inst, // @[register-read.scala:47:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_rvc, // @[register-read.scala:47:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[register-read.scala:47:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[register-read.scala:47:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[register-read.scala:47:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ctrl_is_load, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ctrl_is_std, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[register-read.scala:47:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[register-read.scala:47:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_br, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_jalr, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_jal, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_sfb, // @[register-read.scala:47:14] input [7:0] io_brupdate_b2_uop_br_mask, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_br_tag, // @[register-read.scala:47:14] input [3:0] io_brupdate_b2_uop_ftq_idx, // @[register-read.scala:47:14] input io_brupdate_b2_uop_edge_inst, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[register-read.scala:47:14] input io_brupdate_b2_uop_taken, // @[register-read.scala:47:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[register-read.scala:47:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[register-read.scala:47:14] input [4:0] io_brupdate_b2_uop_rob_idx, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_ldq_idx, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_uop_stq_idx, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_pdst, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_prs1, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_prs2, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_prs3, // @[register-read.scala:47:14] input [3:0] io_brupdate_b2_uop_ppred, // @[register-read.scala:47:14] input io_brupdate_b2_uop_prs1_busy, // @[register-read.scala:47:14] input io_brupdate_b2_uop_prs2_busy, // @[register-read.scala:47:14] input io_brupdate_b2_uop_prs3_busy, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ppred_busy, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_stale_pdst, // @[register-read.scala:47:14] input io_brupdate_b2_uop_exception, // @[register-read.scala:47:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[register-read.scala:47:14] input io_brupdate_b2_uop_bypassable, // @[register-read.scala:47:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[register-read.scala:47:14] input io_brupdate_b2_uop_mem_signed, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_fence, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_fencei, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_amo, // @[register-read.scala:47:14] input io_brupdate_b2_uop_uses_ldq, // @[register-read.scala:47:14] input io_brupdate_b2_uop_uses_stq, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[register-read.scala:47:14] input io_brupdate_b2_uop_is_unique, // @[register-read.scala:47:14] input io_brupdate_b2_uop_flush_on_commit, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_ldst, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[register-read.scala:47:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[register-read.scala:47:14] input io_brupdate_b2_uop_ldst_val, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[register-read.scala:47:14] input io_brupdate_b2_uop_frs3_en, // @[register-read.scala:47:14] input io_brupdate_b2_uop_fp_val, // @[register-read.scala:47:14] input io_brupdate_b2_uop_fp_single, // @[register-read.scala:47:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[register-read.scala:47:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[register-read.scala:47:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[register-read.scala:47:14] input io_brupdate_b2_uop_bp_debug_if, // @[register-read.scala:47:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[register-read.scala:47:14] input io_brupdate_b2_valid, // @[register-read.scala:47:14] input io_brupdate_b2_mispredict, // @[register-read.scala:47:14] input io_brupdate_b2_taken, // @[register-read.scala:47:14] input [2:0] io_brupdate_b2_cfi_type, // @[register-read.scala:47:14] input [1:0] io_brupdate_b2_pc_sel, // @[register-read.scala:47:14] input [39:0] io_brupdate_b2_jalr_target, // @[register-read.scala:47:14] input [20:0] io_brupdate_b2_target_offset // @[register-read.scala:47:14] ); wire _rrd_decode_unit_io_rrd_valid; // @[register-read.scala:80:33] wire [7:0] _rrd_decode_unit_io_rrd_uop_br_mask; // @[register-read.scala:80:33] wire io_iss_valids_0_0 = io_iss_valids_0; // @[register-read.scala:34:7] wire [6:0] io_iss_uops_0_uopc_0 = io_iss_uops_0_uopc; // @[register-read.scala:34:7] wire [31:0] io_iss_uops_0_inst_0 = io_iss_uops_0_inst; // @[register-read.scala:34:7] wire [31:0] io_iss_uops_0_debug_inst_0 = io_iss_uops_0_debug_inst; // @[register-read.scala:34:7] wire io_iss_uops_0_is_rvc_0 = io_iss_uops_0_is_rvc; // @[register-read.scala:34:7] wire [39:0] io_iss_uops_0_debug_pc_0 = io_iss_uops_0_debug_pc; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_iq_type_0 = io_iss_uops_0_iq_type; // @[register-read.scala:34:7] wire [9:0] io_iss_uops_0_fu_code_0 = io_iss_uops_0_fu_code; // @[register-read.scala:34:7] wire [3:0] io_iss_uops_0_ctrl_br_type_0 = io_iss_uops_0_ctrl_br_type; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_ctrl_op1_sel_0 = io_iss_uops_0_ctrl_op1_sel; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_ctrl_op2_sel_0 = io_iss_uops_0_ctrl_op2_sel; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_ctrl_imm_sel_0 = io_iss_uops_0_ctrl_imm_sel; // @[register-read.scala:34:7] wire [4:0] io_iss_uops_0_ctrl_op_fcn_0 = io_iss_uops_0_ctrl_op_fcn; // @[register-read.scala:34:7] wire io_iss_uops_0_ctrl_fcn_dw_0 = io_iss_uops_0_ctrl_fcn_dw; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_ctrl_csr_cmd_0 = io_iss_uops_0_ctrl_csr_cmd; // @[register-read.scala:34:7] wire io_iss_uops_0_ctrl_is_load_0 = io_iss_uops_0_ctrl_is_load; // @[register-read.scala:34:7] wire io_iss_uops_0_ctrl_is_sta_0 = io_iss_uops_0_ctrl_is_sta; // @[register-read.scala:34:7] wire io_iss_uops_0_ctrl_is_std_0 = io_iss_uops_0_ctrl_is_std; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_iw_state_0 = io_iss_uops_0_iw_state; // @[register-read.scala:34:7] wire io_iss_uops_0_is_br_0 = io_iss_uops_0_is_br; // @[register-read.scala:34:7] wire io_iss_uops_0_is_jalr_0 = io_iss_uops_0_is_jalr; // @[register-read.scala:34:7] wire io_iss_uops_0_is_jal_0 = io_iss_uops_0_is_jal; // @[register-read.scala:34:7] wire io_iss_uops_0_is_sfb_0 = io_iss_uops_0_is_sfb; // @[register-read.scala:34:7] wire [7:0] io_iss_uops_0_br_mask_0 = io_iss_uops_0_br_mask; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_br_tag_0 = io_iss_uops_0_br_tag; // @[register-read.scala:34:7] wire [3:0] io_iss_uops_0_ftq_idx_0 = io_iss_uops_0_ftq_idx; // @[register-read.scala:34:7] wire io_iss_uops_0_edge_inst_0 = io_iss_uops_0_edge_inst; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_pc_lob_0 = io_iss_uops_0_pc_lob; // @[register-read.scala:34:7] wire io_iss_uops_0_taken_0 = io_iss_uops_0_taken; // @[register-read.scala:34:7] wire [19:0] io_iss_uops_0_imm_packed_0 = io_iss_uops_0_imm_packed; // @[register-read.scala:34:7] wire [11:0] io_iss_uops_0_csr_addr_0 = io_iss_uops_0_csr_addr; // @[register-read.scala:34:7] wire [4:0] io_iss_uops_0_rob_idx_0 = io_iss_uops_0_rob_idx; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_ldq_idx_0 = io_iss_uops_0_ldq_idx; // @[register-read.scala:34:7] wire [2:0] io_iss_uops_0_stq_idx_0 = io_iss_uops_0_stq_idx; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_rxq_idx_0 = io_iss_uops_0_rxq_idx; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_pdst_0 = io_iss_uops_0_pdst; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_prs1_0 = io_iss_uops_0_prs1; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_prs2_0 = io_iss_uops_0_prs2; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_prs3_0 = io_iss_uops_0_prs3; // @[register-read.scala:34:7] wire [3:0] io_iss_uops_0_ppred_0 = io_iss_uops_0_ppred; // @[register-read.scala:34:7] wire io_iss_uops_0_prs1_busy_0 = io_iss_uops_0_prs1_busy; // @[register-read.scala:34:7] wire io_iss_uops_0_prs2_busy_0 = io_iss_uops_0_prs2_busy; // @[register-read.scala:34:7] wire io_iss_uops_0_prs3_busy_0 = io_iss_uops_0_prs3_busy; // @[register-read.scala:34:7] wire io_iss_uops_0_ppred_busy_0 = io_iss_uops_0_ppred_busy; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_stale_pdst_0 = io_iss_uops_0_stale_pdst; // @[register-read.scala:34:7] wire io_iss_uops_0_exception_0 = io_iss_uops_0_exception; // @[register-read.scala:34:7] wire [63:0] io_iss_uops_0_exc_cause_0 = io_iss_uops_0_exc_cause; // @[register-read.scala:34:7] wire io_iss_uops_0_bypassable_0 = io_iss_uops_0_bypassable; // @[register-read.scala:34:7] wire [4:0] io_iss_uops_0_mem_cmd_0 = io_iss_uops_0_mem_cmd; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_mem_size_0 = io_iss_uops_0_mem_size; // @[register-read.scala:34:7] wire io_iss_uops_0_mem_signed_0 = io_iss_uops_0_mem_signed; // @[register-read.scala:34:7] wire io_iss_uops_0_is_fence_0 = io_iss_uops_0_is_fence; // @[register-read.scala:34:7] wire io_iss_uops_0_is_fencei_0 = io_iss_uops_0_is_fencei; // @[register-read.scala:34:7] wire io_iss_uops_0_is_amo_0 = io_iss_uops_0_is_amo; // @[register-read.scala:34:7] wire io_iss_uops_0_uses_ldq_0 = io_iss_uops_0_uses_ldq; // @[register-read.scala:34:7] wire io_iss_uops_0_uses_stq_0 = io_iss_uops_0_uses_stq; // @[register-read.scala:34:7] wire io_iss_uops_0_is_sys_pc2epc_0 = io_iss_uops_0_is_sys_pc2epc; // @[register-read.scala:34:7] wire io_iss_uops_0_is_unique_0 = io_iss_uops_0_is_unique; // @[register-read.scala:34:7] wire io_iss_uops_0_flush_on_commit_0 = io_iss_uops_0_flush_on_commit; // @[register-read.scala:34:7] wire io_iss_uops_0_ldst_is_rs1_0 = io_iss_uops_0_ldst_is_rs1; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_ldst_0 = io_iss_uops_0_ldst; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_lrs1_0 = io_iss_uops_0_lrs1; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_lrs2_0 = io_iss_uops_0_lrs2; // @[register-read.scala:34:7] wire [5:0] io_iss_uops_0_lrs3_0 = io_iss_uops_0_lrs3; // @[register-read.scala:34:7] wire io_iss_uops_0_ldst_val_0 = io_iss_uops_0_ldst_val; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_dst_rtype_0 = io_iss_uops_0_dst_rtype; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_lrs1_rtype_0 = io_iss_uops_0_lrs1_rtype; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_lrs2_rtype_0 = io_iss_uops_0_lrs2_rtype; // @[register-read.scala:34:7] wire io_iss_uops_0_frs3_en_0 = io_iss_uops_0_frs3_en; // @[register-read.scala:34:7] wire io_iss_uops_0_fp_val_0 = io_iss_uops_0_fp_val; // @[register-read.scala:34:7] wire io_iss_uops_0_fp_single_0 = io_iss_uops_0_fp_single; // @[register-read.scala:34:7] wire io_iss_uops_0_xcpt_pf_if_0 = io_iss_uops_0_xcpt_pf_if; // @[register-read.scala:34:7] wire io_iss_uops_0_xcpt_ae_if_0 = io_iss_uops_0_xcpt_ae_if; // @[register-read.scala:34:7] wire io_iss_uops_0_xcpt_ma_if_0 = io_iss_uops_0_xcpt_ma_if; // @[register-read.scala:34:7] wire io_iss_uops_0_bp_debug_if_0 = io_iss_uops_0_bp_debug_if; // @[register-read.scala:34:7] wire io_iss_uops_0_bp_xcpt_if_0 = io_iss_uops_0_bp_xcpt_if; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_debug_fsrc_0 = io_iss_uops_0_debug_fsrc; // @[register-read.scala:34:7] wire [1:0] io_iss_uops_0_debug_tsrc_0 = io_iss_uops_0_debug_tsrc; // @[register-read.scala:34:7] wire [64:0] io_rf_read_ports_0_data_0 = io_rf_read_ports_0_data; // @[register-read.scala:34:7] wire [64:0] io_rf_read_ports_1_data_0 = io_rf_read_ports_1_data; // @[register-read.scala:34:7] wire [64:0] io_rf_read_ports_2_data_0 = io_rf_read_ports_2_data; // @[register-read.scala:34:7] wire io_kill_0 = io_kill; // @[register-read.scala:34:7] wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[register-read.scala:34:7] wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[register-read.scala:34:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[register-read.scala:34:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[register-read.scala:34:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[register-read.scala:34:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[register-read.scala:34:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[register-read.scala:34:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[register-read.scala:34:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[register-read.scala:34:7] wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[register-read.scala:34:7] wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[register-read.scala:34:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[register-read.scala:34:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[register-read.scala:34:7] wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[register-read.scala:34:7] wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[register-read.scala:34:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[register-read.scala:34:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[register-read.scala:34:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[register-read.scala:34:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[register-read.scala:34:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[register-read.scala:34:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[register-read.scala:34:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[register-read.scala:34:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[register-read.scala:34:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[register-read.scala:34:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[register-read.scala:34:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[register-read.scala:34:7] wire [6:0] exe_reg_uops_0_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [31:0] exe_reg_uops_0_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] exe_reg_uops_0_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire [39:0] exe_reg_uops_0_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [9:0] exe_reg_uops_0_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [7:0] exe_reg_uops_0_uop_br_mask = 8'h0; // @[consts.scala:269:19] wire [19:0] exe_reg_uops_0_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [11:0] exe_reg_uops_0_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [63:0] exe_reg_uops_0_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_prs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_prs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_prs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_stale_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] exe_reg_uops_0_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] exe_reg_uops_0_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [4:0] exe_reg_uops_0_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] exe_reg_uops_0_uop_rob_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] exe_reg_uops_0_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] exe_reg_uops_0_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [2:0] exe_reg_uops_0_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_uop_br_tag = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_uop_ldq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_uop_stq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] exe_reg_uops_0_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] exe_reg_uops_0_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] exe_reg_uops_0_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [3:0] io_prf_read_ports_0_addr = 4'h0; // @[register-read.scala:34:7] wire [3:0] exe_reg_uops_0_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] exe_reg_uops_0_uop_ftq_idx = 4'h0; // @[consts.scala:269:19] wire [3:0] exe_reg_uops_0_uop_ppred = 4'h0; // @[consts.scala:269:19] wire [3:0] exe_reg_uops_0_cs_br_type = 4'h0; // @[consts.scala:279:18] wire io_iss_uops_0_iw_p1_poisoned = 1'h0; // @[register-read.scala:34:7] wire io_iss_uops_0_iw_p2_poisoned = 1'h0; // @[register-read.scala:34:7] wire io_prf_read_ports_0_data = 1'h0; // @[register-read.scala:34:7] wire io_exe_reqs_0_ready = 1'h0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_pred_data = 1'h0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_kill = 1'h0; // @[register-read.scala:34:7] wire _exe_reg_valids_WIRE_0 = 1'h0; // @[register-read.scala:69:41] wire rrd_uops_0_newuop_iw_p1_poisoned = 1'h0; // @[util.scala:73:26] wire rrd_uops_0_newuop_iw_p2_poisoned = 1'h0; // @[util.scala:73:26] wire rrd_pred_data_0 = 1'h0; // @[register-read.scala:97:28] wire exe_reg_uops_0_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_br = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_taken = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_exception = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire exe_reg_uops_0_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire exe_reg_uops_0_cs_is_load = 1'h0; // @[consts.scala:279:18] wire exe_reg_uops_0_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire exe_reg_uops_0_cs_is_std = 1'h0; // @[consts.scala:279:18] wire bypassed_pred_data_0 = 1'h0; // @[register-read.scala:154:32] wire [5:0] io_rf_read_ports_0_addr_0 = io_iss_uops_0_prs1_0; // @[register-read.scala:34:7] wire [5:0] io_rf_read_ports_1_addr_0 = io_iss_uops_0_prs2_0; // @[register-read.scala:34:7] wire [5:0] io_rf_read_ports_2_addr_0 = io_iss_uops_0_prs3_0; // @[register-read.scala:34:7] wire [3:0] io_exe_reqs_0_bits_uop_ctrl_br_type_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_ctrl_op1_sel_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_ctrl_op2_sel_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_ctrl_imm_sel_0; // @[register-read.scala:34:7] wire [4:0] io_exe_reqs_0_bits_uop_ctrl_op_fcn_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ctrl_fcn_dw_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_ctrl_csr_cmd_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ctrl_is_load_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ctrl_is_sta_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ctrl_is_std_0; // @[register-read.scala:34:7] wire [6:0] io_exe_reqs_0_bits_uop_uopc_0; // @[register-read.scala:34:7] wire [31:0] io_exe_reqs_0_bits_uop_inst_0; // @[register-read.scala:34:7] wire [31:0] io_exe_reqs_0_bits_uop_debug_inst_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_rvc_0; // @[register-read.scala:34:7] wire [39:0] io_exe_reqs_0_bits_uop_debug_pc_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_iq_type_0; // @[register-read.scala:34:7] wire [9:0] io_exe_reqs_0_bits_uop_fu_code_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_iw_state_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_iw_p1_poisoned_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_iw_p2_poisoned_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_br_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_jalr_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_jal_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_sfb_0; // @[register-read.scala:34:7] wire [7:0] io_exe_reqs_0_bits_uop_br_mask_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_br_tag_0; // @[register-read.scala:34:7] wire [3:0] io_exe_reqs_0_bits_uop_ftq_idx_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_edge_inst_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_pc_lob_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_taken_0; // @[register-read.scala:34:7] wire [19:0] io_exe_reqs_0_bits_uop_imm_packed_0; // @[register-read.scala:34:7] wire [11:0] io_exe_reqs_0_bits_uop_csr_addr_0; // @[register-read.scala:34:7] wire [4:0] io_exe_reqs_0_bits_uop_rob_idx_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_ldq_idx_0; // @[register-read.scala:34:7] wire [2:0] io_exe_reqs_0_bits_uop_stq_idx_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_rxq_idx_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_pdst_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_prs1_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_prs2_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_prs3_0; // @[register-read.scala:34:7] wire [3:0] io_exe_reqs_0_bits_uop_ppred_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_prs1_busy_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_prs2_busy_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_prs3_busy_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ppred_busy_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_stale_pdst_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_exception_0; // @[register-read.scala:34:7] wire [63:0] io_exe_reqs_0_bits_uop_exc_cause_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_bypassable_0; // @[register-read.scala:34:7] wire [4:0] io_exe_reqs_0_bits_uop_mem_cmd_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_mem_size_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_mem_signed_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_fence_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_fencei_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_amo_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_uses_ldq_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_uses_stq_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_sys_pc2epc_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_is_unique_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_flush_on_commit_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ldst_is_rs1_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_ldst_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_lrs1_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_lrs2_0; // @[register-read.scala:34:7] wire [5:0] io_exe_reqs_0_bits_uop_lrs3_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_ldst_val_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_dst_rtype_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_lrs1_rtype_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_lrs2_rtype_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_frs3_en_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_fp_val_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_fp_single_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_xcpt_pf_if_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_xcpt_ae_if_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_xcpt_ma_if_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_bp_debug_if_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_bits_uop_bp_xcpt_if_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_debug_fsrc_0; // @[register-read.scala:34:7] wire [1:0] io_exe_reqs_0_bits_uop_debug_tsrc_0; // @[register-read.scala:34:7] wire [64:0] io_exe_reqs_0_bits_rs1_data_0; // @[register-read.scala:34:7] wire [64:0] io_exe_reqs_0_bits_rs2_data_0; // @[register-read.scala:34:7] wire [64:0] io_exe_reqs_0_bits_rs3_data_0; // @[register-read.scala:34:7] wire io_exe_reqs_0_valid_0; // @[register-read.scala:34:7] wire rrd_valids_0; // @[register-read.scala:66:30] wire [3:0] rrd_uops_0_ctrl_br_type; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_ctrl_op1_sel; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_ctrl_op2_sel; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_ctrl_imm_sel; // @[register-read.scala:67:30] wire [4:0] rrd_uops_0_ctrl_op_fcn; // @[register-read.scala:67:30] wire rrd_uops_0_ctrl_fcn_dw; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_ctrl_csr_cmd; // @[register-read.scala:67:30] wire rrd_uops_0_ctrl_is_load; // @[register-read.scala:67:30] wire rrd_uops_0_ctrl_is_sta; // @[register-read.scala:67:30] wire rrd_uops_0_ctrl_is_std; // @[register-read.scala:67:30] wire [6:0] rrd_uops_0_uopc; // @[register-read.scala:67:30] wire [31:0] rrd_uops_0_inst; // @[register-read.scala:67:30] wire [31:0] rrd_uops_0_debug_inst; // @[register-read.scala:67:30] wire rrd_uops_0_is_rvc; // @[register-read.scala:67:30] wire [39:0] rrd_uops_0_debug_pc; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_iq_type; // @[register-read.scala:67:30] wire [9:0] rrd_uops_0_fu_code; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_iw_state; // @[register-read.scala:67:30] wire rrd_uops_0_iw_p1_poisoned; // @[register-read.scala:67:30] wire rrd_uops_0_iw_p2_poisoned; // @[register-read.scala:67:30] wire rrd_uops_0_is_br; // @[register-read.scala:67:30] wire rrd_uops_0_is_jalr; // @[register-read.scala:67:30] wire rrd_uops_0_is_jal; // @[register-read.scala:67:30] wire rrd_uops_0_is_sfb; // @[register-read.scala:67:30] wire [7:0] rrd_uops_0_br_mask; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_br_tag; // @[register-read.scala:67:30] wire [3:0] rrd_uops_0_ftq_idx; // @[register-read.scala:67:30] wire rrd_uops_0_edge_inst; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_pc_lob; // @[register-read.scala:67:30] wire rrd_uops_0_taken; // @[register-read.scala:67:30] wire [19:0] rrd_uops_0_imm_packed; // @[register-read.scala:67:30] wire [11:0] rrd_uops_0_csr_addr; // @[register-read.scala:67:30] wire [4:0] rrd_uops_0_rob_idx; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_ldq_idx; // @[register-read.scala:67:30] wire [2:0] rrd_uops_0_stq_idx; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_rxq_idx; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_pdst; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_prs1; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_prs2; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_prs3; // @[register-read.scala:67:30] wire [3:0] rrd_uops_0_ppred; // @[register-read.scala:67:30] wire rrd_uops_0_prs1_busy; // @[register-read.scala:67:30] wire rrd_uops_0_prs2_busy; // @[register-read.scala:67:30] wire rrd_uops_0_prs3_busy; // @[register-read.scala:67:30] wire rrd_uops_0_ppred_busy; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_stale_pdst; // @[register-read.scala:67:30] wire rrd_uops_0_exception; // @[register-read.scala:67:30] wire [63:0] rrd_uops_0_exc_cause; // @[register-read.scala:67:30] wire rrd_uops_0_bypassable; // @[register-read.scala:67:30] wire [4:0] rrd_uops_0_mem_cmd; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_mem_size; // @[register-read.scala:67:30] wire rrd_uops_0_mem_signed; // @[register-read.scala:67:30] wire rrd_uops_0_is_fence; // @[register-read.scala:67:30] wire rrd_uops_0_is_fencei; // @[register-read.scala:67:30] wire rrd_uops_0_is_amo; // @[register-read.scala:67:30] wire rrd_uops_0_uses_ldq; // @[register-read.scala:67:30] wire rrd_uops_0_uses_stq; // @[register-read.scala:67:30] wire rrd_uops_0_is_sys_pc2epc; // @[register-read.scala:67:30] wire rrd_uops_0_is_unique; // @[register-read.scala:67:30] wire rrd_uops_0_flush_on_commit; // @[register-read.scala:67:30] wire rrd_uops_0_ldst_is_rs1; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_ldst; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_lrs1; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_lrs2; // @[register-read.scala:67:30] wire [5:0] rrd_uops_0_lrs3; // @[register-read.scala:67:30] wire rrd_uops_0_ldst_val; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_dst_rtype; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_lrs1_rtype; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_lrs2_rtype; // @[register-read.scala:67:30] wire rrd_uops_0_frs3_en; // @[register-read.scala:67:30] wire rrd_uops_0_fp_val; // @[register-read.scala:67:30] wire rrd_uops_0_fp_single; // @[register-read.scala:67:30] wire rrd_uops_0_xcpt_pf_if; // @[register-read.scala:67:30] wire rrd_uops_0_xcpt_ae_if; // @[register-read.scala:67:30] wire rrd_uops_0_xcpt_ma_if; // @[register-read.scala:67:30] wire rrd_uops_0_bp_debug_if; // @[register-read.scala:67:30] wire rrd_uops_0_bp_xcpt_if; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_debug_fsrc; // @[register-read.scala:67:30] wire [1:0] rrd_uops_0_debug_tsrc; // @[register-read.scala:67:30] reg exe_reg_valids_0; // @[register-read.scala:69:33] assign io_exe_reqs_0_valid_0 = exe_reg_valids_0; // @[register-read.scala:34:7, :69:33] reg [6:0] exe_reg_uops_0_uopc; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_uopc_0 = exe_reg_uops_0_uopc; // @[register-read.scala:34:7, :70:29] reg [31:0] exe_reg_uops_0_inst; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_inst_0 = exe_reg_uops_0_inst; // @[register-read.scala:34:7, :70:29] reg [31:0] exe_reg_uops_0_debug_inst; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_debug_inst_0 = exe_reg_uops_0_debug_inst; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_rvc; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_rvc_0 = exe_reg_uops_0_is_rvc; // @[register-read.scala:34:7, :70:29] reg [39:0] exe_reg_uops_0_debug_pc; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_debug_pc_0 = exe_reg_uops_0_debug_pc; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_iq_type; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_iq_type_0 = exe_reg_uops_0_iq_type; // @[register-read.scala:34:7, :70:29] reg [9:0] exe_reg_uops_0_fu_code; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_fu_code_0 = exe_reg_uops_0_fu_code; // @[register-read.scala:34:7, :70:29] reg [3:0] exe_reg_uops_0_ctrl_br_type; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_br_type_0 = exe_reg_uops_0_ctrl_br_type; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_ctrl_op1_sel; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_op1_sel_0 = exe_reg_uops_0_ctrl_op1_sel; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_ctrl_op2_sel; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_op2_sel_0 = exe_reg_uops_0_ctrl_op2_sel; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_ctrl_imm_sel; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_imm_sel_0 = exe_reg_uops_0_ctrl_imm_sel; // @[register-read.scala:34:7, :70:29] reg [4:0] exe_reg_uops_0_ctrl_op_fcn; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_op_fcn_0 = exe_reg_uops_0_ctrl_op_fcn; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ctrl_fcn_dw; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_fcn_dw_0 = exe_reg_uops_0_ctrl_fcn_dw; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_ctrl_csr_cmd; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_csr_cmd_0 = exe_reg_uops_0_ctrl_csr_cmd; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ctrl_is_load; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_is_load_0 = exe_reg_uops_0_ctrl_is_load; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ctrl_is_sta; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_is_sta_0 = exe_reg_uops_0_ctrl_is_sta; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ctrl_is_std; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ctrl_is_std_0 = exe_reg_uops_0_ctrl_is_std; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_iw_state; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_iw_state_0 = exe_reg_uops_0_iw_state; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_iw_p1_poisoned; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_iw_p1_poisoned_0 = exe_reg_uops_0_iw_p1_poisoned; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_iw_p2_poisoned; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_iw_p2_poisoned_0 = exe_reg_uops_0_iw_p2_poisoned; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_br; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_br_0 = exe_reg_uops_0_is_br; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_jalr; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_jalr_0 = exe_reg_uops_0_is_jalr; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_jal; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_jal_0 = exe_reg_uops_0_is_jal; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_sfb; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_sfb_0 = exe_reg_uops_0_is_sfb; // @[register-read.scala:34:7, :70:29] reg [7:0] exe_reg_uops_0_br_mask; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_br_mask_0 = exe_reg_uops_0_br_mask; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_br_tag; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_br_tag_0 = exe_reg_uops_0_br_tag; // @[register-read.scala:34:7, :70:29] reg [3:0] exe_reg_uops_0_ftq_idx; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ftq_idx_0 = exe_reg_uops_0_ftq_idx; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_edge_inst; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_edge_inst_0 = exe_reg_uops_0_edge_inst; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_pc_lob; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_pc_lob_0 = exe_reg_uops_0_pc_lob; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_taken; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_taken_0 = exe_reg_uops_0_taken; // @[register-read.scala:34:7, :70:29] reg [19:0] exe_reg_uops_0_imm_packed; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_imm_packed_0 = exe_reg_uops_0_imm_packed; // @[register-read.scala:34:7, :70:29] reg [11:0] exe_reg_uops_0_csr_addr; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_csr_addr_0 = exe_reg_uops_0_csr_addr; // @[register-read.scala:34:7, :70:29] reg [4:0] exe_reg_uops_0_rob_idx; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_rob_idx_0 = exe_reg_uops_0_rob_idx; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_ldq_idx; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ldq_idx_0 = exe_reg_uops_0_ldq_idx; // @[register-read.scala:34:7, :70:29] reg [2:0] exe_reg_uops_0_stq_idx; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_stq_idx_0 = exe_reg_uops_0_stq_idx; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_rxq_idx; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_rxq_idx_0 = exe_reg_uops_0_rxq_idx; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_pdst; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_pdst_0 = exe_reg_uops_0_pdst; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_prs1; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_prs1_0 = exe_reg_uops_0_prs1; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_prs2; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_prs2_0 = exe_reg_uops_0_prs2; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_prs3; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_prs3_0 = exe_reg_uops_0_prs3; // @[register-read.scala:34:7, :70:29] reg [3:0] exe_reg_uops_0_ppred; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ppred_0 = exe_reg_uops_0_ppred; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_prs1_busy; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_prs1_busy_0 = exe_reg_uops_0_prs1_busy; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_prs2_busy; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_prs2_busy_0 = exe_reg_uops_0_prs2_busy; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_prs3_busy; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_prs3_busy_0 = exe_reg_uops_0_prs3_busy; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ppred_busy; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ppred_busy_0 = exe_reg_uops_0_ppred_busy; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_stale_pdst; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_stale_pdst_0 = exe_reg_uops_0_stale_pdst; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_exception; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_exception_0 = exe_reg_uops_0_exception; // @[register-read.scala:34:7, :70:29] reg [63:0] exe_reg_uops_0_exc_cause; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_exc_cause_0 = exe_reg_uops_0_exc_cause; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_bypassable; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_bypassable_0 = exe_reg_uops_0_bypassable; // @[register-read.scala:34:7, :70:29] reg [4:0] exe_reg_uops_0_mem_cmd; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_mem_cmd_0 = exe_reg_uops_0_mem_cmd; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_mem_size; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_mem_size_0 = exe_reg_uops_0_mem_size; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_mem_signed; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_mem_signed_0 = exe_reg_uops_0_mem_signed; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_fence; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_fence_0 = exe_reg_uops_0_is_fence; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_fencei; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_fencei_0 = exe_reg_uops_0_is_fencei; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_amo; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_amo_0 = exe_reg_uops_0_is_amo; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_uses_ldq; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_uses_ldq_0 = exe_reg_uops_0_uses_ldq; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_uses_stq; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_uses_stq_0 = exe_reg_uops_0_uses_stq; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_sys_pc2epc; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_sys_pc2epc_0 = exe_reg_uops_0_is_sys_pc2epc; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_is_unique; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_is_unique_0 = exe_reg_uops_0_is_unique; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_flush_on_commit; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_flush_on_commit_0 = exe_reg_uops_0_flush_on_commit; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ldst_is_rs1; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ldst_is_rs1_0 = exe_reg_uops_0_ldst_is_rs1; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_ldst; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ldst_0 = exe_reg_uops_0_ldst; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_lrs1; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_lrs1_0 = exe_reg_uops_0_lrs1; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_lrs2; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_lrs2_0 = exe_reg_uops_0_lrs2; // @[register-read.scala:34:7, :70:29] reg [5:0] exe_reg_uops_0_lrs3; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_lrs3_0 = exe_reg_uops_0_lrs3; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_ldst_val; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_ldst_val_0 = exe_reg_uops_0_ldst_val; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_dst_rtype; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_dst_rtype_0 = exe_reg_uops_0_dst_rtype; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_lrs1_rtype; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_lrs1_rtype_0 = exe_reg_uops_0_lrs1_rtype; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_lrs2_rtype; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_lrs2_rtype_0 = exe_reg_uops_0_lrs2_rtype; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_frs3_en; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_frs3_en_0 = exe_reg_uops_0_frs3_en; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_fp_val; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_fp_val_0 = exe_reg_uops_0_fp_val; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_fp_single; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_fp_single_0 = exe_reg_uops_0_fp_single; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_xcpt_pf_if; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_xcpt_pf_if_0 = exe_reg_uops_0_xcpt_pf_if; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_xcpt_ae_if; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_xcpt_ae_if_0 = exe_reg_uops_0_xcpt_ae_if; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_xcpt_ma_if; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_xcpt_ma_if_0 = exe_reg_uops_0_xcpt_ma_if; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_bp_debug_if; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_bp_debug_if_0 = exe_reg_uops_0_bp_debug_if; // @[register-read.scala:34:7, :70:29] reg exe_reg_uops_0_bp_xcpt_if; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_bp_xcpt_if_0 = exe_reg_uops_0_bp_xcpt_if; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_debug_fsrc; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_debug_fsrc_0 = exe_reg_uops_0_debug_fsrc; // @[register-read.scala:34:7, :70:29] reg [1:0] exe_reg_uops_0_debug_tsrc; // @[register-read.scala:70:29] assign io_exe_reqs_0_bits_uop_debug_tsrc_0 = exe_reg_uops_0_debug_tsrc; // @[register-read.scala:34:7, :70:29] reg [64:0] exe_reg_rs1_data_0; // @[register-read.scala:71:29] assign io_exe_reqs_0_bits_rs1_data_0 = exe_reg_rs1_data_0; // @[register-read.scala:34:7, :71:29] reg [64:0] exe_reg_rs2_data_0; // @[register-read.scala:72:29] assign io_exe_reqs_0_bits_rs2_data_0 = exe_reg_rs2_data_0; // @[register-read.scala:34:7, :72:29] reg [64:0] exe_reg_rs3_data_0; // @[register-read.scala:73:29] assign io_exe_reqs_0_bits_rs3_data_0 = exe_reg_rs3_data_0; // @[register-read.scala:34:7, :73:29] wire [7:0] _rrd_valids_0_T = io_brupdate_b1_mispredict_mask_0 & _rrd_decode_unit_io_rrd_uop_br_mask; // @[util.scala:118:51] wire _rrd_valids_0_T_1 = |_rrd_valids_0_T; // @[util.scala:118:{51,59}] wire _rrd_valids_0_T_2 = ~_rrd_valids_0_T_1; // @[util.scala:118:59] wire _rrd_valids_0_T_3 = _rrd_decode_unit_io_rrd_valid & _rrd_valids_0_T_2; // @[register-read.scala:80:33, :84:59, :85:17] reg rrd_valids_0_REG; // @[register-read.scala:84:29] assign rrd_valids_0 = rrd_valids_0_REG; // @[register-read.scala:66:30, :84:29] wire [7:0] _rrd_uops_0_newuop_br_mask_T_1; // @[util.scala:74:35] wire [3:0] rrd_uops_0_newuop_ctrl_br_type; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_ctrl_op1_sel; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_ctrl_op2_sel; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_ctrl_imm_sel; // @[util.scala:73:26] wire [4:0] rrd_uops_0_newuop_ctrl_op_fcn; // @[util.scala:73:26] wire rrd_uops_0_newuop_ctrl_fcn_dw; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_ctrl_csr_cmd; // @[util.scala:73:26] wire rrd_uops_0_newuop_ctrl_is_load; // @[util.scala:73:26] wire rrd_uops_0_newuop_ctrl_is_sta; // @[util.scala:73:26] wire rrd_uops_0_newuop_ctrl_is_std; // @[util.scala:73:26] wire [6:0] rrd_uops_0_newuop_uopc; // @[util.scala:73:26] wire [31:0] rrd_uops_0_newuop_inst; // @[util.scala:73:26] wire [31:0] rrd_uops_0_newuop_debug_inst; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_rvc; // @[util.scala:73:26] wire [39:0] rrd_uops_0_newuop_debug_pc; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_iq_type; // @[util.scala:73:26] wire [9:0] rrd_uops_0_newuop_fu_code; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_iw_state; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_br; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_jalr; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_jal; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_sfb; // @[util.scala:73:26] wire [7:0] rrd_uops_0_newuop_br_mask; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_br_tag; // @[util.scala:73:26] wire [3:0] rrd_uops_0_newuop_ftq_idx; // @[util.scala:73:26] wire rrd_uops_0_newuop_edge_inst; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_pc_lob; // @[util.scala:73:26] wire rrd_uops_0_newuop_taken; // @[util.scala:73:26] wire [19:0] rrd_uops_0_newuop_imm_packed; // @[util.scala:73:26] wire [11:0] rrd_uops_0_newuop_csr_addr; // @[util.scala:73:26] wire [4:0] rrd_uops_0_newuop_rob_idx; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_ldq_idx; // @[util.scala:73:26] wire [2:0] rrd_uops_0_newuop_stq_idx; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_rxq_idx; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_pdst; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_prs1; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_prs2; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_prs3; // @[util.scala:73:26] wire [3:0] rrd_uops_0_newuop_ppred; // @[util.scala:73:26] wire rrd_uops_0_newuop_prs1_busy; // @[util.scala:73:26] wire rrd_uops_0_newuop_prs2_busy; // @[util.scala:73:26] wire rrd_uops_0_newuop_prs3_busy; // @[util.scala:73:26] wire rrd_uops_0_newuop_ppred_busy; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_stale_pdst; // @[util.scala:73:26] wire rrd_uops_0_newuop_exception; // @[util.scala:73:26] wire [63:0] rrd_uops_0_newuop_exc_cause; // @[util.scala:73:26] wire rrd_uops_0_newuop_bypassable; // @[util.scala:73:26] wire [4:0] rrd_uops_0_newuop_mem_cmd; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_mem_size; // @[util.scala:73:26] wire rrd_uops_0_newuop_mem_signed; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_fence; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_fencei; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_amo; // @[util.scala:73:26] wire rrd_uops_0_newuop_uses_ldq; // @[util.scala:73:26] wire rrd_uops_0_newuop_uses_stq; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_sys_pc2epc; // @[util.scala:73:26] wire rrd_uops_0_newuop_is_unique; // @[util.scala:73:26] wire rrd_uops_0_newuop_flush_on_commit; // @[util.scala:73:26] wire rrd_uops_0_newuop_ldst_is_rs1; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_ldst; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_lrs1; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_lrs2; // @[util.scala:73:26] wire [5:0] rrd_uops_0_newuop_lrs3; // @[util.scala:73:26] wire rrd_uops_0_newuop_ldst_val; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_dst_rtype; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_lrs1_rtype; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_lrs2_rtype; // @[util.scala:73:26] wire rrd_uops_0_newuop_frs3_en; // @[util.scala:73:26] wire rrd_uops_0_newuop_fp_val; // @[util.scala:73:26] wire rrd_uops_0_newuop_fp_single; // @[util.scala:73:26] wire rrd_uops_0_newuop_xcpt_pf_if; // @[util.scala:73:26] wire rrd_uops_0_newuop_xcpt_ae_if; // @[util.scala:73:26] wire rrd_uops_0_newuop_xcpt_ma_if; // @[util.scala:73:26] wire rrd_uops_0_newuop_bp_debug_if; // @[util.scala:73:26] wire rrd_uops_0_newuop_bp_xcpt_if; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_debug_fsrc; // @[util.scala:73:26] wire [1:0] rrd_uops_0_newuop_debug_tsrc; // @[util.scala:73:26] wire [7:0] _rrd_uops_0_newuop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37] assign _rrd_uops_0_newuop_br_mask_T_1 = _rrd_decode_unit_io_rrd_uop_br_mask & _rrd_uops_0_newuop_br_mask_T; // @[util.scala:74:{35,37}] assign rrd_uops_0_newuop_br_mask = _rrd_uops_0_newuop_br_mask_T_1; // @[util.scala:73:26, :74:35] reg [6:0] rrd_uops_0_REG_uopc; // @[register-read.scala:86:29] assign rrd_uops_0_uopc = rrd_uops_0_REG_uopc; // @[register-read.scala:67:30, :86:29] reg [31:0] rrd_uops_0_REG_inst; // @[register-read.scala:86:29] assign rrd_uops_0_inst = rrd_uops_0_REG_inst; // @[register-read.scala:67:30, :86:29] reg [31:0] rrd_uops_0_REG_debug_inst; // @[register-read.scala:86:29] assign rrd_uops_0_debug_inst = rrd_uops_0_REG_debug_inst; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_rvc; // @[register-read.scala:86:29] assign rrd_uops_0_is_rvc = rrd_uops_0_REG_is_rvc; // @[register-read.scala:67:30, :86:29] reg [39:0] rrd_uops_0_REG_debug_pc; // @[register-read.scala:86:29] assign rrd_uops_0_debug_pc = rrd_uops_0_REG_debug_pc; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_iq_type; // @[register-read.scala:86:29] assign rrd_uops_0_iq_type = rrd_uops_0_REG_iq_type; // @[register-read.scala:67:30, :86:29] reg [9:0] rrd_uops_0_REG_fu_code; // @[register-read.scala:86:29] assign rrd_uops_0_fu_code = rrd_uops_0_REG_fu_code; // @[register-read.scala:67:30, :86:29] reg [3:0] rrd_uops_0_REG_ctrl_br_type; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_br_type = rrd_uops_0_REG_ctrl_br_type; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_ctrl_op1_sel; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_op1_sel = rrd_uops_0_REG_ctrl_op1_sel; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_ctrl_op2_sel; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_op2_sel = rrd_uops_0_REG_ctrl_op2_sel; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_ctrl_imm_sel; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_imm_sel = rrd_uops_0_REG_ctrl_imm_sel; // @[register-read.scala:67:30, :86:29] reg [4:0] rrd_uops_0_REG_ctrl_op_fcn; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_op_fcn = rrd_uops_0_REG_ctrl_op_fcn; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ctrl_fcn_dw; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_fcn_dw = rrd_uops_0_REG_ctrl_fcn_dw; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_ctrl_csr_cmd; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_csr_cmd = rrd_uops_0_REG_ctrl_csr_cmd; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ctrl_is_load; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_is_load = rrd_uops_0_REG_ctrl_is_load; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ctrl_is_sta; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_is_sta = rrd_uops_0_REG_ctrl_is_sta; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ctrl_is_std; // @[register-read.scala:86:29] assign rrd_uops_0_ctrl_is_std = rrd_uops_0_REG_ctrl_is_std; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_iw_state; // @[register-read.scala:86:29] assign rrd_uops_0_iw_state = rrd_uops_0_REG_iw_state; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_iw_p1_poisoned; // @[register-read.scala:86:29] assign rrd_uops_0_iw_p1_poisoned = rrd_uops_0_REG_iw_p1_poisoned; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_iw_p2_poisoned; // @[register-read.scala:86:29] assign rrd_uops_0_iw_p2_poisoned = rrd_uops_0_REG_iw_p2_poisoned; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_br; // @[register-read.scala:86:29] assign rrd_uops_0_is_br = rrd_uops_0_REG_is_br; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_jalr; // @[register-read.scala:86:29] assign rrd_uops_0_is_jalr = rrd_uops_0_REG_is_jalr; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_jal; // @[register-read.scala:86:29] assign rrd_uops_0_is_jal = rrd_uops_0_REG_is_jal; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_sfb; // @[register-read.scala:86:29] assign rrd_uops_0_is_sfb = rrd_uops_0_REG_is_sfb; // @[register-read.scala:67:30, :86:29] reg [7:0] rrd_uops_0_REG_br_mask; // @[register-read.scala:86:29] assign rrd_uops_0_br_mask = rrd_uops_0_REG_br_mask; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_br_tag; // @[register-read.scala:86:29] assign rrd_uops_0_br_tag = rrd_uops_0_REG_br_tag; // @[register-read.scala:67:30, :86:29] reg [3:0] rrd_uops_0_REG_ftq_idx; // @[register-read.scala:86:29] assign rrd_uops_0_ftq_idx = rrd_uops_0_REG_ftq_idx; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_edge_inst; // @[register-read.scala:86:29] assign rrd_uops_0_edge_inst = rrd_uops_0_REG_edge_inst; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_pc_lob; // @[register-read.scala:86:29] assign rrd_uops_0_pc_lob = rrd_uops_0_REG_pc_lob; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_taken; // @[register-read.scala:86:29] assign rrd_uops_0_taken = rrd_uops_0_REG_taken; // @[register-read.scala:67:30, :86:29] reg [19:0] rrd_uops_0_REG_imm_packed; // @[register-read.scala:86:29] assign rrd_uops_0_imm_packed = rrd_uops_0_REG_imm_packed; // @[register-read.scala:67:30, :86:29] reg [11:0] rrd_uops_0_REG_csr_addr; // @[register-read.scala:86:29] assign rrd_uops_0_csr_addr = rrd_uops_0_REG_csr_addr; // @[register-read.scala:67:30, :86:29] reg [4:0] rrd_uops_0_REG_rob_idx; // @[register-read.scala:86:29] assign rrd_uops_0_rob_idx = rrd_uops_0_REG_rob_idx; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_ldq_idx; // @[register-read.scala:86:29] assign rrd_uops_0_ldq_idx = rrd_uops_0_REG_ldq_idx; // @[register-read.scala:67:30, :86:29] reg [2:0] rrd_uops_0_REG_stq_idx; // @[register-read.scala:86:29] assign rrd_uops_0_stq_idx = rrd_uops_0_REG_stq_idx; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_rxq_idx; // @[register-read.scala:86:29] assign rrd_uops_0_rxq_idx = rrd_uops_0_REG_rxq_idx; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_pdst; // @[register-read.scala:86:29] assign rrd_uops_0_pdst = rrd_uops_0_REG_pdst; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_prs1; // @[register-read.scala:86:29] assign rrd_uops_0_prs1 = rrd_uops_0_REG_prs1; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_prs2; // @[register-read.scala:86:29] assign rrd_uops_0_prs2 = rrd_uops_0_REG_prs2; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_prs3; // @[register-read.scala:86:29] assign rrd_uops_0_prs3 = rrd_uops_0_REG_prs3; // @[register-read.scala:67:30, :86:29] reg [3:0] rrd_uops_0_REG_ppred; // @[register-read.scala:86:29] assign rrd_uops_0_ppred = rrd_uops_0_REG_ppred; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_prs1_busy; // @[register-read.scala:86:29] assign rrd_uops_0_prs1_busy = rrd_uops_0_REG_prs1_busy; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_prs2_busy; // @[register-read.scala:86:29] assign rrd_uops_0_prs2_busy = rrd_uops_0_REG_prs2_busy; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_prs3_busy; // @[register-read.scala:86:29] assign rrd_uops_0_prs3_busy = rrd_uops_0_REG_prs3_busy; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ppred_busy; // @[register-read.scala:86:29] assign rrd_uops_0_ppred_busy = rrd_uops_0_REG_ppred_busy; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_stale_pdst; // @[register-read.scala:86:29] assign rrd_uops_0_stale_pdst = rrd_uops_0_REG_stale_pdst; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_exception; // @[register-read.scala:86:29] assign rrd_uops_0_exception = rrd_uops_0_REG_exception; // @[register-read.scala:67:30, :86:29] reg [63:0] rrd_uops_0_REG_exc_cause; // @[register-read.scala:86:29] assign rrd_uops_0_exc_cause = rrd_uops_0_REG_exc_cause; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_bypassable; // @[register-read.scala:86:29] assign rrd_uops_0_bypassable = rrd_uops_0_REG_bypassable; // @[register-read.scala:67:30, :86:29] reg [4:0] rrd_uops_0_REG_mem_cmd; // @[register-read.scala:86:29] assign rrd_uops_0_mem_cmd = rrd_uops_0_REG_mem_cmd; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_mem_size; // @[register-read.scala:86:29] assign rrd_uops_0_mem_size = rrd_uops_0_REG_mem_size; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_mem_signed; // @[register-read.scala:86:29] assign rrd_uops_0_mem_signed = rrd_uops_0_REG_mem_signed; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_fence; // @[register-read.scala:86:29] assign rrd_uops_0_is_fence = rrd_uops_0_REG_is_fence; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_fencei; // @[register-read.scala:86:29] assign rrd_uops_0_is_fencei = rrd_uops_0_REG_is_fencei; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_amo; // @[register-read.scala:86:29] assign rrd_uops_0_is_amo = rrd_uops_0_REG_is_amo; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_uses_ldq; // @[register-read.scala:86:29] assign rrd_uops_0_uses_ldq = rrd_uops_0_REG_uses_ldq; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_uses_stq; // @[register-read.scala:86:29] assign rrd_uops_0_uses_stq = rrd_uops_0_REG_uses_stq; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_sys_pc2epc; // @[register-read.scala:86:29] assign rrd_uops_0_is_sys_pc2epc = rrd_uops_0_REG_is_sys_pc2epc; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_is_unique; // @[register-read.scala:86:29] assign rrd_uops_0_is_unique = rrd_uops_0_REG_is_unique; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_flush_on_commit; // @[register-read.scala:86:29] assign rrd_uops_0_flush_on_commit = rrd_uops_0_REG_flush_on_commit; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ldst_is_rs1; // @[register-read.scala:86:29] assign rrd_uops_0_ldst_is_rs1 = rrd_uops_0_REG_ldst_is_rs1; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_ldst; // @[register-read.scala:86:29] assign rrd_uops_0_ldst = rrd_uops_0_REG_ldst; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_lrs1; // @[register-read.scala:86:29] assign rrd_uops_0_lrs1 = rrd_uops_0_REG_lrs1; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_lrs2; // @[register-read.scala:86:29] assign rrd_uops_0_lrs2 = rrd_uops_0_REG_lrs2; // @[register-read.scala:67:30, :86:29] reg [5:0] rrd_uops_0_REG_lrs3; // @[register-read.scala:86:29] assign rrd_uops_0_lrs3 = rrd_uops_0_REG_lrs3; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_ldst_val; // @[register-read.scala:86:29] assign rrd_uops_0_ldst_val = rrd_uops_0_REG_ldst_val; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_dst_rtype; // @[register-read.scala:86:29] assign rrd_uops_0_dst_rtype = rrd_uops_0_REG_dst_rtype; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_lrs1_rtype; // @[register-read.scala:86:29] assign rrd_uops_0_lrs1_rtype = rrd_uops_0_REG_lrs1_rtype; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_lrs2_rtype; // @[register-read.scala:86:29] assign rrd_uops_0_lrs2_rtype = rrd_uops_0_REG_lrs2_rtype; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_frs3_en; // @[register-read.scala:86:29] assign rrd_uops_0_frs3_en = rrd_uops_0_REG_frs3_en; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_fp_val; // @[register-read.scala:86:29] assign rrd_uops_0_fp_val = rrd_uops_0_REG_fp_val; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_fp_single; // @[register-read.scala:86:29] assign rrd_uops_0_fp_single = rrd_uops_0_REG_fp_single; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_xcpt_pf_if; // @[register-read.scala:86:29] assign rrd_uops_0_xcpt_pf_if = rrd_uops_0_REG_xcpt_pf_if; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_xcpt_ae_if; // @[register-read.scala:86:29] assign rrd_uops_0_xcpt_ae_if = rrd_uops_0_REG_xcpt_ae_if; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_xcpt_ma_if; // @[register-read.scala:86:29] assign rrd_uops_0_xcpt_ma_if = rrd_uops_0_REG_xcpt_ma_if; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_bp_debug_if; // @[register-read.scala:86:29] assign rrd_uops_0_bp_debug_if = rrd_uops_0_REG_bp_debug_if; // @[register-read.scala:67:30, :86:29] reg rrd_uops_0_REG_bp_xcpt_if; // @[register-read.scala:86:29] assign rrd_uops_0_bp_xcpt_if = rrd_uops_0_REG_bp_xcpt_if; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_debug_fsrc; // @[register-read.scala:86:29] assign rrd_uops_0_debug_fsrc = rrd_uops_0_REG_debug_fsrc; // @[register-read.scala:67:30, :86:29] reg [1:0] rrd_uops_0_REG_debug_tsrc; // @[register-read.scala:86:29] assign rrd_uops_0_debug_tsrc = rrd_uops_0_REG_debug_tsrc; // @[register-read.scala:67:30, :86:29] wire [64:0] _rrd_rs1_data_0_T_1; // @[register-read.scala:124:49] wire [64:0] rrd_rs1_data_0; // @[register-read.scala:94:28] wire [64:0] _bypassed_rs1_data_0_T = rrd_rs1_data_0; // @[Mux.scala:126:16] wire [64:0] _rrd_rs2_data_0_T_1; // @[register-read.scala:125:49] wire [64:0] rrd_rs2_data_0; // @[register-read.scala:95:28] wire [64:0] _bypassed_rs2_data_0_T = rrd_rs2_data_0; // @[Mux.scala:126:16] wire [64:0] _rrd_rs3_data_0_T_1; // @[register-read.scala:126:49] wire [64:0] rrd_rs3_data_0; // @[register-read.scala:96:28] wire _rrd_rs1_data_0_T = io_iss_uops_0_prs1_0 == 6'h0; // @[register-read.scala:34:7, :124:67] reg rrd_rs1_data_0_REG; // @[register-read.scala:124:57] assign _rrd_rs1_data_0_T_1 = rrd_rs1_data_0_REG ? 65'h0 : io_rf_read_ports_0_data_0; // @[register-read.scala:34:7, :124:{49,57}] assign rrd_rs1_data_0 = _rrd_rs1_data_0_T_1; // @[register-read.scala:94:28, :124:49] wire _rrd_rs2_data_0_T = io_iss_uops_0_prs2_0 == 6'h0; // @[register-read.scala:34:7, :125:67] reg rrd_rs2_data_0_REG; // @[register-read.scala:125:57] assign _rrd_rs2_data_0_T_1 = rrd_rs2_data_0_REG ? 65'h0 : io_rf_read_ports_1_data_0; // @[register-read.scala:34:7, :125:{49,57}] assign rrd_rs2_data_0 = _rrd_rs2_data_0_T_1; // @[register-read.scala:95:28, :125:49] wire _rrd_rs3_data_0_T = io_iss_uops_0_prs3_0 == 6'h0; // @[register-read.scala:34:7, :126:67] reg rrd_rs3_data_0_REG; // @[register-read.scala:126:57] assign _rrd_rs3_data_0_T_1 = rrd_rs3_data_0_REG ? 65'h0 : io_rf_read_ports_2_data_0; // @[register-read.scala:34:7, :126:{49,57}] assign rrd_rs3_data_0 = _rrd_rs3_data_0_T_1; // @[register-read.scala:96:28, :126:49] wire [7:0] _rrd_kill_T = io_brupdate_b1_mispredict_mask_0 & rrd_uops_0_br_mask; // @[util.scala:118:51] wire _rrd_kill_T_1 = |_rrd_kill_T; // @[util.scala:118:{51,59}] wire rrd_kill = io_kill_0 | _rrd_kill_T_1; // @[util.scala:118:59] wire _exe_reg_valids_0_T = ~rrd_kill & rrd_valids_0; // @[register-read.scala:66:30, :130:28, :132:29] wire [6:0] _exe_reg_uops_0_T_uopc = rrd_kill ? 7'h0 : rrd_uops_0_uopc; // @[register-read.scala:67:30, :130:28, :134:29] wire [31:0] _exe_reg_uops_0_T_inst = rrd_kill ? 32'h0 : rrd_uops_0_inst; // @[register-read.scala:67:30, :130:28, :134:29] wire [31:0] _exe_reg_uops_0_T_debug_inst = rrd_kill ? 32'h0 : rrd_uops_0_debug_inst; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_is_rvc = ~rrd_kill & rrd_uops_0_is_rvc; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [39:0] _exe_reg_uops_0_T_debug_pc = rrd_kill ? 40'h0 : rrd_uops_0_debug_pc; // @[register-read.scala:67:30, :130:28, :134:29] wire [2:0] _exe_reg_uops_0_T_iq_type = rrd_kill ? 3'h0 : rrd_uops_0_iq_type; // @[register-read.scala:67:30, :130:28, :134:29] wire [9:0] _exe_reg_uops_0_T_fu_code = rrd_kill ? 10'h0 : rrd_uops_0_fu_code; // @[register-read.scala:67:30, :130:28, :134:29] wire [3:0] _exe_reg_uops_0_T_ctrl_br_type = rrd_kill ? 4'h0 : rrd_uops_0_ctrl_br_type; // @[register-read.scala:67:30, :130:28, :134:29] wire [1:0] _exe_reg_uops_0_T_ctrl_op1_sel = rrd_kill ? 2'h0 : rrd_uops_0_ctrl_op1_sel; // @[register-read.scala:67:30, :130:28, :134:29] wire [2:0] _exe_reg_uops_0_T_ctrl_op2_sel = rrd_kill ? 3'h0 : rrd_uops_0_ctrl_op2_sel; // @[register-read.scala:67:30, :130:28, :134:29] wire [2:0] _exe_reg_uops_0_T_ctrl_imm_sel = rrd_kill ? 3'h0 : rrd_uops_0_ctrl_imm_sel; // @[register-read.scala:67:30, :130:28, :134:29] wire [4:0] _exe_reg_uops_0_T_ctrl_op_fcn = rrd_kill ? 5'h0 : rrd_uops_0_ctrl_op_fcn; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_ctrl_fcn_dw = ~rrd_kill & rrd_uops_0_ctrl_fcn_dw; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [2:0] _exe_reg_uops_0_T_ctrl_csr_cmd = rrd_kill ? 3'h0 : rrd_uops_0_ctrl_csr_cmd; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_ctrl_is_load = ~rrd_kill & rrd_uops_0_ctrl_is_load; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_ctrl_is_sta = ~rrd_kill & rrd_uops_0_ctrl_is_sta; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_ctrl_is_std = ~rrd_kill & rrd_uops_0_ctrl_is_std; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [1:0] _exe_reg_uops_0_T_iw_state = rrd_kill ? 2'h0 : rrd_uops_0_iw_state; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_iw_p1_poisoned = ~rrd_kill & rrd_uops_0_iw_p1_poisoned; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_iw_p2_poisoned = ~rrd_kill & rrd_uops_0_iw_p2_poisoned; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_br = ~rrd_kill & rrd_uops_0_is_br; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_jalr = ~rrd_kill & rrd_uops_0_is_jalr; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_jal = ~rrd_kill & rrd_uops_0_is_jal; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_sfb = ~rrd_kill & rrd_uops_0_is_sfb; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [7:0] _exe_reg_uops_0_T_br_mask = rrd_kill ? 8'h0 : rrd_uops_0_br_mask; // @[register-read.scala:67:30, :130:28, :134:29] wire [2:0] _exe_reg_uops_0_T_br_tag = rrd_kill ? 3'h0 : rrd_uops_0_br_tag; // @[register-read.scala:67:30, :130:28, :134:29] wire [3:0] _exe_reg_uops_0_T_ftq_idx = rrd_kill ? 4'h0 : rrd_uops_0_ftq_idx; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_edge_inst = ~rrd_kill & rrd_uops_0_edge_inst; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [5:0] _exe_reg_uops_0_T_pc_lob = rrd_kill ? 6'h0 : rrd_uops_0_pc_lob; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_taken = ~rrd_kill & rrd_uops_0_taken; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [19:0] _exe_reg_uops_0_T_imm_packed = rrd_kill ? 20'h0 : rrd_uops_0_imm_packed; // @[register-read.scala:67:30, :130:28, :134:29] wire [11:0] _exe_reg_uops_0_T_csr_addr = rrd_kill ? 12'h0 : rrd_uops_0_csr_addr; // @[register-read.scala:67:30, :130:28, :134:29] wire [4:0] _exe_reg_uops_0_T_rob_idx = rrd_kill ? 5'h0 : rrd_uops_0_rob_idx; // @[register-read.scala:67:30, :130:28, :134:29] wire [2:0] _exe_reg_uops_0_T_ldq_idx = rrd_kill ? 3'h0 : rrd_uops_0_ldq_idx; // @[register-read.scala:67:30, :130:28, :134:29] wire [2:0] _exe_reg_uops_0_T_stq_idx = rrd_kill ? 3'h0 : rrd_uops_0_stq_idx; // @[register-read.scala:67:30, :130:28, :134:29] wire [1:0] _exe_reg_uops_0_T_rxq_idx = rrd_kill ? 2'h0 : rrd_uops_0_rxq_idx; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_pdst = rrd_kill ? 6'h0 : rrd_uops_0_pdst; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_prs1 = rrd_kill ? 6'h0 : rrd_uops_0_prs1; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_prs2 = rrd_kill ? 6'h0 : rrd_uops_0_prs2; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_prs3 = rrd_kill ? 6'h0 : rrd_uops_0_prs3; // @[register-read.scala:67:30, :130:28, :134:29] wire [3:0] _exe_reg_uops_0_T_ppred = rrd_kill ? 4'h0 : rrd_uops_0_ppred; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_prs1_busy = ~rrd_kill & rrd_uops_0_prs1_busy; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_prs2_busy = ~rrd_kill & rrd_uops_0_prs2_busy; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_prs3_busy = ~rrd_kill & rrd_uops_0_prs3_busy; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_ppred_busy = ~rrd_kill & rrd_uops_0_ppred_busy; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [5:0] _exe_reg_uops_0_T_stale_pdst = rrd_kill ? 6'h0 : rrd_uops_0_stale_pdst; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_exception = ~rrd_kill & rrd_uops_0_exception; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [63:0] _exe_reg_uops_0_T_exc_cause = rrd_kill ? 64'h0 : rrd_uops_0_exc_cause; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_bypassable = ~rrd_kill & rrd_uops_0_bypassable; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [4:0] _exe_reg_uops_0_T_mem_cmd = rrd_kill ? 5'h0 : rrd_uops_0_mem_cmd; // @[register-read.scala:67:30, :130:28, :134:29] wire [1:0] _exe_reg_uops_0_T_mem_size = rrd_kill ? 2'h0 : rrd_uops_0_mem_size; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_mem_signed = ~rrd_kill & rrd_uops_0_mem_signed; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_fence = ~rrd_kill & rrd_uops_0_is_fence; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_fencei = ~rrd_kill & rrd_uops_0_is_fencei; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_amo = ~rrd_kill & rrd_uops_0_is_amo; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_uses_ldq = ~rrd_kill & rrd_uops_0_uses_ldq; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_uses_stq = ~rrd_kill & rrd_uops_0_uses_stq; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_sys_pc2epc = ~rrd_kill & rrd_uops_0_is_sys_pc2epc; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_is_unique = ~rrd_kill & rrd_uops_0_is_unique; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_flush_on_commit = ~rrd_kill & rrd_uops_0_flush_on_commit; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_ldst_is_rs1 = ~rrd_kill & rrd_uops_0_ldst_is_rs1; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [5:0] _exe_reg_uops_0_T_ldst = rrd_kill ? 6'h0 : rrd_uops_0_ldst; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_lrs1 = rrd_kill ? 6'h0 : rrd_uops_0_lrs1; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_lrs2 = rrd_kill ? 6'h0 : rrd_uops_0_lrs2; // @[register-read.scala:67:30, :130:28, :134:29] wire [5:0] _exe_reg_uops_0_T_lrs3 = rrd_kill ? 6'h0 : rrd_uops_0_lrs3; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_ldst_val = ~rrd_kill & rrd_uops_0_ldst_val; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [1:0] _exe_reg_uops_0_T_dst_rtype = rrd_kill ? 2'h2 : rrd_uops_0_dst_rtype; // @[register-read.scala:67:30, :130:28, :134:29] wire [1:0] _exe_reg_uops_0_T_lrs1_rtype = rrd_kill ? 2'h0 : rrd_uops_0_lrs1_rtype; // @[register-read.scala:67:30, :130:28, :134:29] wire [1:0] _exe_reg_uops_0_T_lrs2_rtype = rrd_kill ? 2'h0 : rrd_uops_0_lrs2_rtype; // @[register-read.scala:67:30, :130:28, :134:29] wire _exe_reg_uops_0_T_frs3_en = ~rrd_kill & rrd_uops_0_frs3_en; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_fp_val = ~rrd_kill & rrd_uops_0_fp_val; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_fp_single = ~rrd_kill & rrd_uops_0_fp_single; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_xcpt_pf_if = ~rrd_kill & rrd_uops_0_xcpt_pf_if; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_xcpt_ae_if = ~rrd_kill & rrd_uops_0_xcpt_ae_if; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_xcpt_ma_if = ~rrd_kill & rrd_uops_0_xcpt_ma_if; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_bp_debug_if = ~rrd_kill & rrd_uops_0_bp_debug_if; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire _exe_reg_uops_0_T_bp_xcpt_if = ~rrd_kill & rrd_uops_0_bp_xcpt_if; // @[register-read.scala:67:30, :130:28, :132:29, :134:29] wire [1:0] _exe_reg_uops_0_T_debug_fsrc = rrd_kill ? 2'h0 : rrd_uops_0_debug_fsrc; // @[register-read.scala:67:30, :130:28, :134:29] wire [1:0] _exe_reg_uops_0_T_debug_tsrc = rrd_kill ? 2'h0 : rrd_uops_0_debug_tsrc; // @[register-read.scala:67:30, :130:28, :134:29] wire [7:0] _exe_reg_uops_0_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37, :85:27] wire [7:0] _exe_reg_uops_0_br_mask_T_1 = rrd_uops_0_br_mask & _exe_reg_uops_0_br_mask_T; // @[util.scala:85:{25,27}] wire [64:0] bypassed_rs1_data_0; // @[register-read.scala:152:31] wire [64:0] bypassed_rs2_data_0; // @[register-read.scala:153:31] assign bypassed_rs1_data_0 = _bypassed_rs1_data_0_T; // @[Mux.scala:126:16] assign bypassed_rs2_data_0 = _bypassed_rs2_data_0_T; // @[Mux.scala:126:16] always @(posedge clock) begin // @[register-read.scala:34:7] if (reset) // @[register-read.scala:34:7] exe_reg_valids_0 <= 1'h0; // @[register-read.scala:69:33] else // @[register-read.scala:34:7] exe_reg_valids_0 <= _exe_reg_valids_0_T; // @[register-read.scala:69:33, :132:29] exe_reg_uops_0_uopc <= _exe_reg_uops_0_T_uopc; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_inst <= _exe_reg_uops_0_T_inst; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_debug_inst <= _exe_reg_uops_0_T_debug_inst; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_rvc <= _exe_reg_uops_0_T_is_rvc; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_debug_pc <= _exe_reg_uops_0_T_debug_pc; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_iq_type <= _exe_reg_uops_0_T_iq_type; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_fu_code <= _exe_reg_uops_0_T_fu_code; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_br_type <= _exe_reg_uops_0_T_ctrl_br_type; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_op1_sel <= _exe_reg_uops_0_T_ctrl_op1_sel; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_op2_sel <= _exe_reg_uops_0_T_ctrl_op2_sel; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_imm_sel <= _exe_reg_uops_0_T_ctrl_imm_sel; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_op_fcn <= _exe_reg_uops_0_T_ctrl_op_fcn; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_fcn_dw <= _exe_reg_uops_0_T_ctrl_fcn_dw; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_csr_cmd <= _exe_reg_uops_0_T_ctrl_csr_cmd; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_is_load <= _exe_reg_uops_0_T_ctrl_is_load; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_is_sta <= _exe_reg_uops_0_T_ctrl_is_sta; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ctrl_is_std <= _exe_reg_uops_0_T_ctrl_is_std; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_iw_state <= _exe_reg_uops_0_T_iw_state; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_iw_p1_poisoned <= _exe_reg_uops_0_T_iw_p1_poisoned; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_iw_p2_poisoned <= _exe_reg_uops_0_T_iw_p2_poisoned; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_br <= _exe_reg_uops_0_T_is_br; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_jalr <= _exe_reg_uops_0_T_is_jalr; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_jal <= _exe_reg_uops_0_T_is_jal; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_sfb <= _exe_reg_uops_0_T_is_sfb; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_br_mask <= _exe_reg_uops_0_br_mask_T_1; // @[util.scala:85:25] exe_reg_uops_0_br_tag <= _exe_reg_uops_0_T_br_tag; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ftq_idx <= _exe_reg_uops_0_T_ftq_idx; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_edge_inst <= _exe_reg_uops_0_T_edge_inst; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_pc_lob <= _exe_reg_uops_0_T_pc_lob; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_taken <= _exe_reg_uops_0_T_taken; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_imm_packed <= _exe_reg_uops_0_T_imm_packed; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_csr_addr <= _exe_reg_uops_0_T_csr_addr; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_rob_idx <= _exe_reg_uops_0_T_rob_idx; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ldq_idx <= _exe_reg_uops_0_T_ldq_idx; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_stq_idx <= _exe_reg_uops_0_T_stq_idx; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_rxq_idx <= _exe_reg_uops_0_T_rxq_idx; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_pdst <= _exe_reg_uops_0_T_pdst; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_prs1 <= _exe_reg_uops_0_T_prs1; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_prs2 <= _exe_reg_uops_0_T_prs2; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_prs3 <= _exe_reg_uops_0_T_prs3; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ppred <= _exe_reg_uops_0_T_ppred; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_prs1_busy <= _exe_reg_uops_0_T_prs1_busy; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_prs2_busy <= _exe_reg_uops_0_T_prs2_busy; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_prs3_busy <= _exe_reg_uops_0_T_prs3_busy; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ppred_busy <= _exe_reg_uops_0_T_ppred_busy; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_stale_pdst <= _exe_reg_uops_0_T_stale_pdst; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_exception <= _exe_reg_uops_0_T_exception; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_exc_cause <= _exe_reg_uops_0_T_exc_cause; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_bypassable <= _exe_reg_uops_0_T_bypassable; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_mem_cmd <= _exe_reg_uops_0_T_mem_cmd; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_mem_size <= _exe_reg_uops_0_T_mem_size; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_mem_signed <= _exe_reg_uops_0_T_mem_signed; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_fence <= _exe_reg_uops_0_T_is_fence; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_fencei <= _exe_reg_uops_0_T_is_fencei; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_amo <= _exe_reg_uops_0_T_is_amo; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_uses_ldq <= _exe_reg_uops_0_T_uses_ldq; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_uses_stq <= _exe_reg_uops_0_T_uses_stq; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_sys_pc2epc <= _exe_reg_uops_0_T_is_sys_pc2epc; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_is_unique <= _exe_reg_uops_0_T_is_unique; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_flush_on_commit <= _exe_reg_uops_0_T_flush_on_commit; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ldst_is_rs1 <= _exe_reg_uops_0_T_ldst_is_rs1; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ldst <= _exe_reg_uops_0_T_ldst; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_lrs1 <= _exe_reg_uops_0_T_lrs1; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_lrs2 <= _exe_reg_uops_0_T_lrs2; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_lrs3 <= _exe_reg_uops_0_T_lrs3; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_ldst_val <= _exe_reg_uops_0_T_ldst_val; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_dst_rtype <= _exe_reg_uops_0_T_dst_rtype; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_lrs1_rtype <= _exe_reg_uops_0_T_lrs1_rtype; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_lrs2_rtype <= _exe_reg_uops_0_T_lrs2_rtype; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_frs3_en <= _exe_reg_uops_0_T_frs3_en; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_fp_val <= _exe_reg_uops_0_T_fp_val; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_fp_single <= _exe_reg_uops_0_T_fp_single; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_xcpt_pf_if <= _exe_reg_uops_0_T_xcpt_pf_if; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_xcpt_ae_if <= _exe_reg_uops_0_T_xcpt_ae_if; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_xcpt_ma_if <= _exe_reg_uops_0_T_xcpt_ma_if; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_bp_debug_if <= _exe_reg_uops_0_T_bp_debug_if; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_bp_xcpt_if <= _exe_reg_uops_0_T_bp_xcpt_if; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_debug_fsrc <= _exe_reg_uops_0_T_debug_fsrc; // @[register-read.scala:70:29, :134:29] exe_reg_uops_0_debug_tsrc <= _exe_reg_uops_0_T_debug_tsrc; // @[register-read.scala:70:29, :134:29] exe_reg_rs1_data_0 <= bypassed_rs1_data_0; // @[register-read.scala:71:29, :152:31] exe_reg_rs2_data_0 <= bypassed_rs2_data_0; // @[register-read.scala:72:29, :153:31] exe_reg_rs3_data_0 <= rrd_rs3_data_0; // @[register-read.scala:73:29, :96:28] rrd_valids_0_REG <= _rrd_valids_0_T_3; // @[register-read.scala:84:{29,59}] rrd_uops_0_REG_uopc <= rrd_uops_0_newuop_uopc; // @[util.scala:73:26] rrd_uops_0_REG_inst <= rrd_uops_0_newuop_inst; // @[util.scala:73:26] rrd_uops_0_REG_debug_inst <= rrd_uops_0_newuop_debug_inst; // @[util.scala:73:26] rrd_uops_0_REG_is_rvc <= rrd_uops_0_newuop_is_rvc; // @[util.scala:73:26] rrd_uops_0_REG_debug_pc <= rrd_uops_0_newuop_debug_pc; // @[util.scala:73:26] rrd_uops_0_REG_iq_type <= rrd_uops_0_newuop_iq_type; // @[util.scala:73:26] rrd_uops_0_REG_fu_code <= rrd_uops_0_newuop_fu_code; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_br_type <= rrd_uops_0_newuop_ctrl_br_type; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_op1_sel <= rrd_uops_0_newuop_ctrl_op1_sel; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_op2_sel <= rrd_uops_0_newuop_ctrl_op2_sel; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_imm_sel <= rrd_uops_0_newuop_ctrl_imm_sel; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_op_fcn <= rrd_uops_0_newuop_ctrl_op_fcn; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_fcn_dw <= rrd_uops_0_newuop_ctrl_fcn_dw; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_csr_cmd <= rrd_uops_0_newuop_ctrl_csr_cmd; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_is_load <= rrd_uops_0_newuop_ctrl_is_load; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_is_sta <= rrd_uops_0_newuop_ctrl_is_sta; // @[util.scala:73:26] rrd_uops_0_REG_ctrl_is_std <= rrd_uops_0_newuop_ctrl_is_std; // @[util.scala:73:26] rrd_uops_0_REG_iw_state <= rrd_uops_0_newuop_iw_state; // @[util.scala:73:26] rrd_uops_0_REG_iw_p1_poisoned <= 1'h0; // @[register-read.scala:86:29] rrd_uops_0_REG_iw_p2_poisoned <= 1'h0; // @[register-read.scala:86:29] rrd_uops_0_REG_is_br <= rrd_uops_0_newuop_is_br; // @[util.scala:73:26] rrd_uops_0_REG_is_jalr <= rrd_uops_0_newuop_is_jalr; // @[util.scala:73:26] rrd_uops_0_REG_is_jal <= rrd_uops_0_newuop_is_jal; // @[util.scala:73:26] rrd_uops_0_REG_is_sfb <= rrd_uops_0_newuop_is_sfb; // @[util.scala:73:26] rrd_uops_0_REG_br_mask <= rrd_uops_0_newuop_br_mask; // @[util.scala:73:26] rrd_uops_0_REG_br_tag <= rrd_uops_0_newuop_br_tag; // @[util.scala:73:26] rrd_uops_0_REG_ftq_idx <= rrd_uops_0_newuop_ftq_idx; // @[util.scala:73:26] rrd_uops_0_REG_edge_inst <= rrd_uops_0_newuop_edge_inst; // @[util.scala:73:26] rrd_uops_0_REG_pc_lob <= rrd_uops_0_newuop_pc_lob; // @[util.scala:73:26] rrd_uops_0_REG_taken <= rrd_uops_0_newuop_taken; // @[util.scala:73:26] rrd_uops_0_REG_imm_packed <= rrd_uops_0_newuop_imm_packed; // @[util.scala:73:26] rrd_uops_0_REG_csr_addr <= rrd_uops_0_newuop_csr_addr; // @[util.scala:73:26] rrd_uops_0_REG_rob_idx <= rrd_uops_0_newuop_rob_idx; // @[util.scala:73:26] rrd_uops_0_REG_ldq_idx <= rrd_uops_0_newuop_ldq_idx; // @[util.scala:73:26] rrd_uops_0_REG_stq_idx <= rrd_uops_0_newuop_stq_idx; // @[util.scala:73:26] rrd_uops_0_REG_rxq_idx <= rrd_uops_0_newuop_rxq_idx; // @[util.scala:73:26] rrd_uops_0_REG_pdst <= rrd_uops_0_newuop_pdst; // @[util.scala:73:26] rrd_uops_0_REG_prs1 <= rrd_uops_0_newuop_prs1; // @[util.scala:73:26] rrd_uops_0_REG_prs2 <= rrd_uops_0_newuop_prs2; // @[util.scala:73:26] rrd_uops_0_REG_prs3 <= rrd_uops_0_newuop_prs3; // @[util.scala:73:26] rrd_uops_0_REG_ppred <= rrd_uops_0_newuop_ppred; // @[util.scala:73:26] rrd_uops_0_REG_prs1_busy <= rrd_uops_0_newuop_prs1_busy; // @[util.scala:73:26] rrd_uops_0_REG_prs2_busy <= rrd_uops_0_newuop_prs2_busy; // @[util.scala:73:26] rrd_uops_0_REG_prs3_busy <= rrd_uops_0_newuop_prs3_busy; // @[util.scala:73:26] rrd_uops_0_REG_ppred_busy <= rrd_uops_0_newuop_ppred_busy; // @[util.scala:73:26] rrd_uops_0_REG_stale_pdst <= rrd_uops_0_newuop_stale_pdst; // @[util.scala:73:26] rrd_uops_0_REG_exception <= rrd_uops_0_newuop_exception; // @[util.scala:73:26] rrd_uops_0_REG_exc_cause <= rrd_uops_0_newuop_exc_cause; // @[util.scala:73:26] rrd_uops_0_REG_bypassable <= rrd_uops_0_newuop_bypassable; // @[util.scala:73:26] rrd_uops_0_REG_mem_cmd <= rrd_uops_0_newuop_mem_cmd; // @[util.scala:73:26] rrd_uops_0_REG_mem_size <= rrd_uops_0_newuop_mem_size; // @[util.scala:73:26] rrd_uops_0_REG_mem_signed <= rrd_uops_0_newuop_mem_signed; // @[util.scala:73:26] rrd_uops_0_REG_is_fence <= rrd_uops_0_newuop_is_fence; // @[util.scala:73:26] rrd_uops_0_REG_is_fencei <= rrd_uops_0_newuop_is_fencei; // @[util.scala:73:26] rrd_uops_0_REG_is_amo <= rrd_uops_0_newuop_is_amo; // @[util.scala:73:26] rrd_uops_0_REG_uses_ldq <= rrd_uops_0_newuop_uses_ldq; // @[util.scala:73:26] rrd_uops_0_REG_uses_stq <= rrd_uops_0_newuop_uses_stq; // @[util.scala:73:26] rrd_uops_0_REG_is_sys_pc2epc <= rrd_uops_0_newuop_is_sys_pc2epc; // @[util.scala:73:26] rrd_uops_0_REG_is_unique <= rrd_uops_0_newuop_is_unique; // @[util.scala:73:26] rrd_uops_0_REG_flush_on_commit <= rrd_uops_0_newuop_flush_on_commit; // @[util.scala:73:26] rrd_uops_0_REG_ldst_is_rs1 <= rrd_uops_0_newuop_ldst_is_rs1; // @[util.scala:73:26] rrd_uops_0_REG_ldst <= rrd_uops_0_newuop_ldst; // @[util.scala:73:26] rrd_uops_0_REG_lrs1 <= rrd_uops_0_newuop_lrs1; // @[util.scala:73:26] rrd_uops_0_REG_lrs2 <= rrd_uops_0_newuop_lrs2; // @[util.scala:73:26] rrd_uops_0_REG_lrs3 <= rrd_uops_0_newuop_lrs3; // @[util.scala:73:26] rrd_uops_0_REG_ldst_val <= rrd_uops_0_newuop_ldst_val; // @[util.scala:73:26] rrd_uops_0_REG_dst_rtype <= rrd_uops_0_newuop_dst_rtype; // @[util.scala:73:26] rrd_uops_0_REG_lrs1_rtype <= rrd_uops_0_newuop_lrs1_rtype; // @[util.scala:73:26] rrd_uops_0_REG_lrs2_rtype <= rrd_uops_0_newuop_lrs2_rtype; // @[util.scala:73:26] rrd_uops_0_REG_frs3_en <= rrd_uops_0_newuop_frs3_en; // @[util.scala:73:26] rrd_uops_0_REG_fp_val <= rrd_uops_0_newuop_fp_val; // @[util.scala:73:26] rrd_uops_0_REG_fp_single <= rrd_uops_0_newuop_fp_single; // @[util.scala:73:26] rrd_uops_0_REG_xcpt_pf_if <= rrd_uops_0_newuop_xcpt_pf_if; // @[util.scala:73:26] rrd_uops_0_REG_xcpt_ae_if <= rrd_uops_0_newuop_xcpt_ae_if; // @[util.scala:73:26] rrd_uops_0_REG_xcpt_ma_if <= rrd_uops_0_newuop_xcpt_ma_if; // @[util.scala:73:26] rrd_uops_0_REG_bp_debug_if <= rrd_uops_0_newuop_bp_debug_if; // @[util.scala:73:26] rrd_uops_0_REG_bp_xcpt_if <= rrd_uops_0_newuop_bp_xcpt_if; // @[util.scala:73:26] rrd_uops_0_REG_debug_fsrc <= rrd_uops_0_newuop_debug_fsrc; // @[util.scala:73:26] rrd_uops_0_REG_debug_tsrc <= rrd_uops_0_newuop_debug_tsrc; // @[util.scala:73:26] rrd_rs1_data_0_REG <= _rrd_rs1_data_0_T; // @[register-read.scala:124:{57,67}] rrd_rs2_data_0_REG <= _rrd_rs2_data_0_T; // @[register-read.scala:125:{57,67}] rrd_rs3_data_0_REG <= _rrd_rs3_data_0_T; // @[register-read.scala:126:{57,67}] always @(posedge) RegisterReadDecode_3 rrd_decode_unit ( // @[register-read.scala:80:33] .clock (clock), .reset (reset), .io_iss_valid (io_iss_valids_0_0), // @[register-read.scala:34:7] .io_iss_uop_uopc (io_iss_uops_0_uopc_0), // @[register-read.scala:34:7] .io_iss_uop_inst (io_iss_uops_0_inst_0), // @[register-read.scala:34:7] .io_iss_uop_debug_inst (io_iss_uops_0_debug_inst_0), // @[register-read.scala:34:7] .io_iss_uop_is_rvc (io_iss_uops_0_is_rvc_0), // @[register-read.scala:34:7] .io_iss_uop_debug_pc (io_iss_uops_0_debug_pc_0), // @[register-read.scala:34:7] .io_iss_uop_iq_type (io_iss_uops_0_iq_type_0), // @[register-read.scala:34:7] .io_iss_uop_fu_code (io_iss_uops_0_fu_code_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_br_type (io_iss_uops_0_ctrl_br_type_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_op1_sel (io_iss_uops_0_ctrl_op1_sel_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_op2_sel (io_iss_uops_0_ctrl_op2_sel_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_imm_sel (io_iss_uops_0_ctrl_imm_sel_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_op_fcn (io_iss_uops_0_ctrl_op_fcn_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_fcn_dw (io_iss_uops_0_ctrl_fcn_dw_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_csr_cmd (io_iss_uops_0_ctrl_csr_cmd_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_is_load (io_iss_uops_0_ctrl_is_load_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_is_sta (io_iss_uops_0_ctrl_is_sta_0), // @[register-read.scala:34:7] .io_iss_uop_ctrl_is_std (io_iss_uops_0_ctrl_is_std_0), // @[register-read.scala:34:7] .io_iss_uop_iw_state (io_iss_uops_0_iw_state_0), // @[register-read.scala:34:7] .io_iss_uop_is_br (io_iss_uops_0_is_br_0), // @[register-read.scala:34:7] .io_iss_uop_is_jalr (io_iss_uops_0_is_jalr_0), // @[register-read.scala:34:7] .io_iss_uop_is_jal (io_iss_uops_0_is_jal_0), // @[register-read.scala:34:7] .io_iss_uop_is_sfb (io_iss_uops_0_is_sfb_0), // @[register-read.scala:34:7] .io_iss_uop_br_mask (io_iss_uops_0_br_mask_0), // @[register-read.scala:34:7] .io_iss_uop_br_tag (io_iss_uops_0_br_tag_0), // @[register-read.scala:34:7] .io_iss_uop_ftq_idx (io_iss_uops_0_ftq_idx_0), // @[register-read.scala:34:7] .io_iss_uop_edge_inst (io_iss_uops_0_edge_inst_0), // @[register-read.scala:34:7] .io_iss_uop_pc_lob (io_iss_uops_0_pc_lob_0), // @[register-read.scala:34:7] .io_iss_uop_taken (io_iss_uops_0_taken_0), // @[register-read.scala:34:7] .io_iss_uop_imm_packed (io_iss_uops_0_imm_packed_0), // @[register-read.scala:34:7] .io_iss_uop_csr_addr (io_iss_uops_0_csr_addr_0), // @[register-read.scala:34:7] .io_iss_uop_rob_idx (io_iss_uops_0_rob_idx_0), // @[register-read.scala:34:7] .io_iss_uop_ldq_idx (io_iss_uops_0_ldq_idx_0), // @[register-read.scala:34:7] .io_iss_uop_stq_idx (io_iss_uops_0_stq_idx_0), // @[register-read.scala:34:7] .io_iss_uop_rxq_idx (io_iss_uops_0_rxq_idx_0), // @[register-read.scala:34:7] .io_iss_uop_pdst (io_iss_uops_0_pdst_0), // @[register-read.scala:34:7] .io_iss_uop_prs1 (io_iss_uops_0_prs1_0), // @[register-read.scala:34:7] .io_iss_uop_prs2 (io_iss_uops_0_prs2_0), // @[register-read.scala:34:7] .io_iss_uop_prs3 (io_iss_uops_0_prs3_0), // @[register-read.scala:34:7] .io_iss_uop_ppred (io_iss_uops_0_ppred_0), // @[register-read.scala:34:7] .io_iss_uop_prs1_busy (io_iss_uops_0_prs1_busy_0), // @[register-read.scala:34:7] .io_iss_uop_prs2_busy (io_iss_uops_0_prs2_busy_0), // @[register-read.scala:34:7] .io_iss_uop_prs3_busy (io_iss_uops_0_prs3_busy_0), // @[register-read.scala:34:7] .io_iss_uop_ppred_busy (io_iss_uops_0_ppred_busy_0), // @[register-read.scala:34:7] .io_iss_uop_stale_pdst (io_iss_uops_0_stale_pdst_0), // @[register-read.scala:34:7] .io_iss_uop_exception (io_iss_uops_0_exception_0), // @[register-read.scala:34:7] .io_iss_uop_exc_cause (io_iss_uops_0_exc_cause_0), // @[register-read.scala:34:7] .io_iss_uop_bypassable (io_iss_uops_0_bypassable_0), // @[register-read.scala:34:7] .io_iss_uop_mem_cmd (io_iss_uops_0_mem_cmd_0), // @[register-read.scala:34:7] .io_iss_uop_mem_size (io_iss_uops_0_mem_size_0), // @[register-read.scala:34:7] .io_iss_uop_mem_signed (io_iss_uops_0_mem_signed_0), // @[register-read.scala:34:7] .io_iss_uop_is_fence (io_iss_uops_0_is_fence_0), // @[register-read.scala:34:7] .io_iss_uop_is_fencei (io_iss_uops_0_is_fencei_0), // @[register-read.scala:34:7] .io_iss_uop_is_amo (io_iss_uops_0_is_amo_0), // @[register-read.scala:34:7] .io_iss_uop_uses_ldq (io_iss_uops_0_uses_ldq_0), // @[register-read.scala:34:7] .io_iss_uop_uses_stq (io_iss_uops_0_uses_stq_0), // @[register-read.scala:34:7] .io_iss_uop_is_sys_pc2epc (io_iss_uops_0_is_sys_pc2epc_0), // @[register-read.scala:34:7] .io_iss_uop_is_unique (io_iss_uops_0_is_unique_0), // @[register-read.scala:34:7] .io_iss_uop_flush_on_commit (io_iss_uops_0_flush_on_commit_0), // @[register-read.scala:34:7] .io_iss_uop_ldst_is_rs1 (io_iss_uops_0_ldst_is_rs1_0), // @[register-read.scala:34:7] .io_iss_uop_ldst (io_iss_uops_0_ldst_0), // @[register-read.scala:34:7] .io_iss_uop_lrs1 (io_iss_uops_0_lrs1_0), // @[register-read.scala:34:7] .io_iss_uop_lrs2 (io_iss_uops_0_lrs2_0), // @[register-read.scala:34:7] .io_iss_uop_lrs3 (io_iss_uops_0_lrs3_0), // @[register-read.scala:34:7] .io_iss_uop_ldst_val (io_iss_uops_0_ldst_val_0), // @[register-read.scala:34:7] .io_iss_uop_dst_rtype (io_iss_uops_0_dst_rtype_0), // @[register-read.scala:34:7] .io_iss_uop_lrs1_rtype (io_iss_uops_0_lrs1_rtype_0), // @[register-read.scala:34:7] .io_iss_uop_lrs2_rtype (io_iss_uops_0_lrs2_rtype_0), // @[register-read.scala:34:7] .io_iss_uop_frs3_en (io_iss_uops_0_frs3_en_0), // @[register-read.scala:34:7] .io_iss_uop_fp_val (io_iss_uops_0_fp_val_0), // @[register-read.scala:34:7] .io_iss_uop_fp_single (io_iss_uops_0_fp_single_0), // @[register-read.scala:34:7] .io_iss_uop_xcpt_pf_if (io_iss_uops_0_xcpt_pf_if_0), // @[register-read.scala:34:7] .io_iss_uop_xcpt_ae_if (io_iss_uops_0_xcpt_ae_if_0), // @[register-read.scala:34:7] .io_iss_uop_xcpt_ma_if (io_iss_uops_0_xcpt_ma_if_0), // @[register-read.scala:34:7] .io_iss_uop_bp_debug_if (io_iss_uops_0_bp_debug_if_0), // @[register-read.scala:34:7] .io_iss_uop_bp_xcpt_if (io_iss_uops_0_bp_xcpt_if_0), // @[register-read.scala:34:7] .io_iss_uop_debug_fsrc (io_iss_uops_0_debug_fsrc_0), // @[register-read.scala:34:7] .io_iss_uop_debug_tsrc (io_iss_uops_0_debug_tsrc_0), // @[register-read.scala:34:7] .io_rrd_valid (_rrd_decode_unit_io_rrd_valid), .io_rrd_uop_uopc (rrd_uops_0_newuop_uopc), .io_rrd_uop_inst (rrd_uops_0_newuop_inst), .io_rrd_uop_debug_inst (rrd_uops_0_newuop_debug_inst), .io_rrd_uop_is_rvc (rrd_uops_0_newuop_is_rvc), .io_rrd_uop_debug_pc (rrd_uops_0_newuop_debug_pc), .io_rrd_uop_iq_type (rrd_uops_0_newuop_iq_type), .io_rrd_uop_fu_code (rrd_uops_0_newuop_fu_code), .io_rrd_uop_ctrl_br_type (rrd_uops_0_newuop_ctrl_br_type), .io_rrd_uop_ctrl_op1_sel (rrd_uops_0_newuop_ctrl_op1_sel), .io_rrd_uop_ctrl_op2_sel (rrd_uops_0_newuop_ctrl_op2_sel), .io_rrd_uop_ctrl_imm_sel (rrd_uops_0_newuop_ctrl_imm_sel), .io_rrd_uop_ctrl_op_fcn (rrd_uops_0_newuop_ctrl_op_fcn), .io_rrd_uop_ctrl_fcn_dw (rrd_uops_0_newuop_ctrl_fcn_dw), .io_rrd_uop_ctrl_csr_cmd (rrd_uops_0_newuop_ctrl_csr_cmd), .io_rrd_uop_ctrl_is_load (rrd_uops_0_newuop_ctrl_is_load), .io_rrd_uop_ctrl_is_sta (rrd_uops_0_newuop_ctrl_is_sta), .io_rrd_uop_ctrl_is_std (rrd_uops_0_newuop_ctrl_is_std), .io_rrd_uop_iw_state (rrd_uops_0_newuop_iw_state), .io_rrd_uop_is_br (rrd_uops_0_newuop_is_br), .io_rrd_uop_is_jalr (rrd_uops_0_newuop_is_jalr), .io_rrd_uop_is_jal (rrd_uops_0_newuop_is_jal), .io_rrd_uop_is_sfb (rrd_uops_0_newuop_is_sfb), .io_rrd_uop_br_mask (_rrd_decode_unit_io_rrd_uop_br_mask), .io_rrd_uop_br_tag (rrd_uops_0_newuop_br_tag), .io_rrd_uop_ftq_idx (rrd_uops_0_newuop_ftq_idx), .io_rrd_uop_edge_inst (rrd_uops_0_newuop_edge_inst), .io_rrd_uop_pc_lob (rrd_uops_0_newuop_pc_lob), .io_rrd_uop_taken (rrd_uops_0_newuop_taken), .io_rrd_uop_imm_packed (rrd_uops_0_newuop_imm_packed), .io_rrd_uop_csr_addr (rrd_uops_0_newuop_csr_addr), .io_rrd_uop_rob_idx (rrd_uops_0_newuop_rob_idx), .io_rrd_uop_ldq_idx (rrd_uops_0_newuop_ldq_idx), .io_rrd_uop_stq_idx (rrd_uops_0_newuop_stq_idx), .io_rrd_uop_rxq_idx (rrd_uops_0_newuop_rxq_idx), .io_rrd_uop_pdst (rrd_uops_0_newuop_pdst), .io_rrd_uop_prs1 (rrd_uops_0_newuop_prs1), .io_rrd_uop_prs2 (rrd_uops_0_newuop_prs2), .io_rrd_uop_prs3 (rrd_uops_0_newuop_prs3), .io_rrd_uop_ppred (rrd_uops_0_newuop_ppred), .io_rrd_uop_prs1_busy (rrd_uops_0_newuop_prs1_busy), .io_rrd_uop_prs2_busy (rrd_uops_0_newuop_prs2_busy), .io_rrd_uop_prs3_busy (rrd_uops_0_newuop_prs3_busy), .io_rrd_uop_ppred_busy (rrd_uops_0_newuop_ppred_busy), .io_rrd_uop_stale_pdst (rrd_uops_0_newuop_stale_pdst), .io_rrd_uop_exception (rrd_uops_0_newuop_exception), .io_rrd_uop_exc_cause (rrd_uops_0_newuop_exc_cause), .io_rrd_uop_bypassable (rrd_uops_0_newuop_bypassable), .io_rrd_uop_mem_cmd (rrd_uops_0_newuop_mem_cmd), .io_rrd_uop_mem_size (rrd_uops_0_newuop_mem_size), .io_rrd_uop_mem_signed (rrd_uops_0_newuop_mem_signed), .io_rrd_uop_is_fence (rrd_uops_0_newuop_is_fence), .io_rrd_uop_is_fencei (rrd_uops_0_newuop_is_fencei), .io_rrd_uop_is_amo (rrd_uops_0_newuop_is_amo), .io_rrd_uop_uses_ldq (rrd_uops_0_newuop_uses_ldq), .io_rrd_uop_uses_stq (rrd_uops_0_newuop_uses_stq), .io_rrd_uop_is_sys_pc2epc (rrd_uops_0_newuop_is_sys_pc2epc), .io_rrd_uop_is_unique (rrd_uops_0_newuop_is_unique), .io_rrd_uop_flush_on_commit (rrd_uops_0_newuop_flush_on_commit), .io_rrd_uop_ldst_is_rs1 (rrd_uops_0_newuop_ldst_is_rs1), .io_rrd_uop_ldst (rrd_uops_0_newuop_ldst), .io_rrd_uop_lrs1 (rrd_uops_0_newuop_lrs1), .io_rrd_uop_lrs2 (rrd_uops_0_newuop_lrs2), .io_rrd_uop_lrs3 (rrd_uops_0_newuop_lrs3), .io_rrd_uop_ldst_val (rrd_uops_0_newuop_ldst_val), .io_rrd_uop_dst_rtype (rrd_uops_0_newuop_dst_rtype), .io_rrd_uop_lrs1_rtype (rrd_uops_0_newuop_lrs1_rtype), .io_rrd_uop_lrs2_rtype (rrd_uops_0_newuop_lrs2_rtype), .io_rrd_uop_frs3_en (rrd_uops_0_newuop_frs3_en), .io_rrd_uop_fp_val (rrd_uops_0_newuop_fp_val), .io_rrd_uop_fp_single (rrd_uops_0_newuop_fp_single), .io_rrd_uop_xcpt_pf_if (rrd_uops_0_newuop_xcpt_pf_if), .io_rrd_uop_xcpt_ae_if (rrd_uops_0_newuop_xcpt_ae_if), .io_rrd_uop_xcpt_ma_if (rrd_uops_0_newuop_xcpt_ma_if), .io_rrd_uop_bp_debug_if (rrd_uops_0_newuop_bp_debug_if), .io_rrd_uop_bp_xcpt_if (rrd_uops_0_newuop_bp_xcpt_if), .io_rrd_uop_debug_fsrc (rrd_uops_0_newuop_debug_fsrc), .io_rrd_uop_debug_tsrc (rrd_uops_0_newuop_debug_tsrc) ); // @[register-read.scala:80:33] assign io_rf_read_ports_0_addr = io_rf_read_ports_0_addr_0; // @[register-read.scala:34:7] assign io_rf_read_ports_1_addr = io_rf_read_ports_1_addr_0; // @[register-read.scala:34:7] assign io_rf_read_ports_2_addr = io_rf_read_ports_2_addr_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_valid = io_exe_reqs_0_valid_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_uopc = io_exe_reqs_0_bits_uop_uopc_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_inst = io_exe_reqs_0_bits_uop_inst_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_debug_inst = io_exe_reqs_0_bits_uop_debug_inst_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_rvc = io_exe_reqs_0_bits_uop_is_rvc_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_debug_pc = io_exe_reqs_0_bits_uop_debug_pc_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_iq_type = io_exe_reqs_0_bits_uop_iq_type_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_fu_code = io_exe_reqs_0_bits_uop_fu_code_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_br_type = io_exe_reqs_0_bits_uop_ctrl_br_type_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_op1_sel = io_exe_reqs_0_bits_uop_ctrl_op1_sel_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_op2_sel = io_exe_reqs_0_bits_uop_ctrl_op2_sel_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_imm_sel = io_exe_reqs_0_bits_uop_ctrl_imm_sel_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_op_fcn = io_exe_reqs_0_bits_uop_ctrl_op_fcn_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_fcn_dw = io_exe_reqs_0_bits_uop_ctrl_fcn_dw_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_csr_cmd = io_exe_reqs_0_bits_uop_ctrl_csr_cmd_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_is_load = io_exe_reqs_0_bits_uop_ctrl_is_load_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_is_sta = io_exe_reqs_0_bits_uop_ctrl_is_sta_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ctrl_is_std = io_exe_reqs_0_bits_uop_ctrl_is_std_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_iw_state = io_exe_reqs_0_bits_uop_iw_state_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_iw_p1_poisoned = io_exe_reqs_0_bits_uop_iw_p1_poisoned_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_iw_p2_poisoned = io_exe_reqs_0_bits_uop_iw_p2_poisoned_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_br = io_exe_reqs_0_bits_uop_is_br_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_jalr = io_exe_reqs_0_bits_uop_is_jalr_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_jal = io_exe_reqs_0_bits_uop_is_jal_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_sfb = io_exe_reqs_0_bits_uop_is_sfb_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_br_mask = io_exe_reqs_0_bits_uop_br_mask_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_br_tag = io_exe_reqs_0_bits_uop_br_tag_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ftq_idx = io_exe_reqs_0_bits_uop_ftq_idx_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_edge_inst = io_exe_reqs_0_bits_uop_edge_inst_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_pc_lob = io_exe_reqs_0_bits_uop_pc_lob_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_taken = io_exe_reqs_0_bits_uop_taken_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_imm_packed = io_exe_reqs_0_bits_uop_imm_packed_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_csr_addr = io_exe_reqs_0_bits_uop_csr_addr_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_rob_idx = io_exe_reqs_0_bits_uop_rob_idx_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ldq_idx = io_exe_reqs_0_bits_uop_ldq_idx_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_stq_idx = io_exe_reqs_0_bits_uop_stq_idx_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_rxq_idx = io_exe_reqs_0_bits_uop_rxq_idx_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_pdst = io_exe_reqs_0_bits_uop_pdst_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_prs1 = io_exe_reqs_0_bits_uop_prs1_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_prs2 = io_exe_reqs_0_bits_uop_prs2_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_prs3 = io_exe_reqs_0_bits_uop_prs3_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ppred = io_exe_reqs_0_bits_uop_ppred_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_prs1_busy = io_exe_reqs_0_bits_uop_prs1_busy_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_prs2_busy = io_exe_reqs_0_bits_uop_prs2_busy_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_prs3_busy = io_exe_reqs_0_bits_uop_prs3_busy_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ppred_busy = io_exe_reqs_0_bits_uop_ppred_busy_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_stale_pdst = io_exe_reqs_0_bits_uop_stale_pdst_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_exception = io_exe_reqs_0_bits_uop_exception_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_exc_cause = io_exe_reqs_0_bits_uop_exc_cause_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_bypassable = io_exe_reqs_0_bits_uop_bypassable_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_mem_cmd = io_exe_reqs_0_bits_uop_mem_cmd_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_mem_size = io_exe_reqs_0_bits_uop_mem_size_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_mem_signed = io_exe_reqs_0_bits_uop_mem_signed_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_fence = io_exe_reqs_0_bits_uop_is_fence_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_fencei = io_exe_reqs_0_bits_uop_is_fencei_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_amo = io_exe_reqs_0_bits_uop_is_amo_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_uses_ldq = io_exe_reqs_0_bits_uop_uses_ldq_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_uses_stq = io_exe_reqs_0_bits_uop_uses_stq_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_sys_pc2epc = io_exe_reqs_0_bits_uop_is_sys_pc2epc_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_is_unique = io_exe_reqs_0_bits_uop_is_unique_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_flush_on_commit = io_exe_reqs_0_bits_uop_flush_on_commit_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ldst_is_rs1 = io_exe_reqs_0_bits_uop_ldst_is_rs1_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ldst = io_exe_reqs_0_bits_uop_ldst_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_lrs1 = io_exe_reqs_0_bits_uop_lrs1_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_lrs2 = io_exe_reqs_0_bits_uop_lrs2_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_lrs3 = io_exe_reqs_0_bits_uop_lrs3_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_ldst_val = io_exe_reqs_0_bits_uop_ldst_val_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_dst_rtype = io_exe_reqs_0_bits_uop_dst_rtype_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_lrs1_rtype = io_exe_reqs_0_bits_uop_lrs1_rtype_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_lrs2_rtype = io_exe_reqs_0_bits_uop_lrs2_rtype_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_frs3_en = io_exe_reqs_0_bits_uop_frs3_en_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_fp_val = io_exe_reqs_0_bits_uop_fp_val_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_fp_single = io_exe_reqs_0_bits_uop_fp_single_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_xcpt_pf_if = io_exe_reqs_0_bits_uop_xcpt_pf_if_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_xcpt_ae_if = io_exe_reqs_0_bits_uop_xcpt_ae_if_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_xcpt_ma_if = io_exe_reqs_0_bits_uop_xcpt_ma_if_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_bp_debug_if = io_exe_reqs_0_bits_uop_bp_debug_if_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_bp_xcpt_if = io_exe_reqs_0_bits_uop_bp_xcpt_if_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_debug_fsrc = io_exe_reqs_0_bits_uop_debug_fsrc_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_uop_debug_tsrc = io_exe_reqs_0_bits_uop_debug_tsrc_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_rs1_data = io_exe_reqs_0_bits_rs1_data_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_rs2_data = io_exe_reqs_0_bits_rs2_data_0; // @[register-read.scala:34:7] assign io_exe_reqs_0_bits_rs3_data = io_exe_reqs_0_bits_rs3_data_0; // @[register-read.scala:34:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Fragmenter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes} import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1} import scala.math.min import freechips.rocketchip.util.DataToAugmentedData object EarlyAck { sealed trait T case object AllPuts extends T case object PutFulls extends T case object None extends T } // minSize: minimum size of transfers supported by all outward managers // maxSize: maximum size of transfers supported after the Fragmenter is applied // alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager) // earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat // holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst // nameSuffix: appends a suffix to the module name // Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint // Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin) // Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize") require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize") require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize") val fragmentBits = log2Ceil(maxSize / minSize) val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0 val toggleBits = 1 val addedBits = fragmentBits + toggleBits + fullBits def expandTransfer(x: TransferSizes, op: String) = if (!x) x else { // validate that we can apply the fragmenter correctly require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})") TransferSizes(x.min, maxSize) } private def noChangeRequired = minSize == maxSize private def shrinkTransfer(x: TransferSizes) = if (!alwaysMin) x else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max)) else TransferSizes.none private def mapManager(m: TLSlaveParameters) = m.v1copy( supportsArithmetic = shrinkTransfer(m.supportsArithmetic), supportsLogical = shrinkTransfer(m.supportsLogical), supportsGet = expandTransfer(m.supportsGet, "Get"), supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"), supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"), supportsHint = expandTransfer(m.supportsHint, "Hint")) val node = new TLAdapterNode( // We require that all the responses are mutually FIFO // Thus we need to compact all of the masters into one big master clientFn = { c => (if (noChangeRequired) c else c.v2copy( masters = Seq(TLMasterParameters.v2( name = "TLFragmenter", sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)), requestFifo = true, emits = TLMasterToSlaveTransferSizes( acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)), acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)), arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)), logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)), get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)), putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)), putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)), hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _)) ) )) ))}, managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) } ) { override def circuitIdentity = noChangeRequired } lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => if (noChangeRequired) { out <> in } else { // All managers must share a common FIFO domain (responses might end up interleaved) val manager = edgeOut.manager val managers = manager.managers val beatBytes = manager.beatBytes val fifoId = managers(0).fifoId require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _)) require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe, s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region") require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses") // We can't support devices which are cached on both sides of us require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe) // We can't support denied because we reassemble fragments require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true") require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None) /* The Fragmenter is a bit tricky, because there are 5 sizes in play: * max size -- the maximum transfer size possible * orig size -- the original pre-fragmenter size * frag size -- the modified post-fragmenter size * min size -- the threshold below which frag=orig * beat size -- the amount transfered on any given beat * * The relationships are as follows: * max >= orig >= frag * max > min >= beat * It IS possible that orig <= min (then frag=orig; ie: no fragmentation) * * The fragment# (sent via TL.source) is measured in multiples of min size. * Meanwhile, to track the progress, counters measure in multiples of beat size. * * Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16. * * in.A out.A (frag#) out.D (frag#) in.D gen# ack# * get64 get16 6 ackD16 6 ackD64 12 15 * ackD16 6 ackD64 14 * ackD16 6 ackD64 13 * ackD16 6 ackD64 12 * get16 4 ackD16 4 ackD64 8 11 * ackD16 4 ackD64 10 * ackD16 4 ackD64 9 * ackD16 4 ackD64 8 * get16 2 ackD16 2 ackD64 4 7 * ackD16 2 ackD64 6 * ackD16 2 ackD64 5 * ackD16 2 ackD64 4 * get16 0 ackD16 0 ackD64 0 3 * ackD16 0 ackD64 2 * ackD16 0 ackD64 1 * ackD16 0 ackD64 0 * * get8 get8 0 ackD8 0 ackD8 0 1 * ackD8 0 ackD8 0 * * get4 get4 0 ackD4 0 ackD4 0 0 * get1 get1 0 ackD1 0 ackD1 0 0 * * put64 put16 6 15 * put64 put16 6 14 * put64 put16 6 13 * put64 put16 6 ack16 6 12 12 * put64 put16 4 11 * put64 put16 4 10 * put64 put16 4 9 * put64 put16 4 ack16 4 8 8 * put64 put16 2 7 * put64 put16 2 6 * put64 put16 2 5 * put64 put16 2 ack16 2 4 4 * put64 put16 0 3 * put64 put16 0 2 * put64 put16 0 1 * put64 put16 0 ack16 0 ack64 0 0 * * put8 put8 0 1 * put8 put8 0 ack8 0 ack8 0 0 * * put4 put4 0 ack4 0 ack4 0 0 * put1 put1 0 ack1 0 ack1 0 0 */ val counterBits = log2Up(maxSize/beatBytes) val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize) // Consider the following waveform for two 4-beat bursts: // ---A----A------------ // -------D-----DDD-DDDD // Under TL rules, the second A can use the same source as the first A, // because the source is released for reuse on the first response beat. // // However, if we fragment the requests, it looks like this: // ---3210-3210--------- // -------3-----210-3210 // ... now we've broken the rules because 210 are twice inflight. // // This phenomenon means we can have essentially 2*maxSize/minSize-1 // fragmented transactions in flight per original transaction source. // // To keep the source unique, we encode the beat counter in the low // bits of the source. To solve the overlap, we use a toggle bit. // Whatever toggle bit the D is reassembling, A will use the opposite. // First, handle the return path val acknum = RegInit(0.U(counterBits.W)) val dOrig = Reg(UInt()) val dToggle = RegInit(false.B) val dFragnum = out.d.bits.source(fragmentBits-1, 0) val dFirst = acknum === 0.U val dLast = dFragnum === 0.U // only for AccessAck (!Data) val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1) val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize)) val dHasData = edgeOut.hasData(out.d.bits) // calculate new acknum val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes) val acknum_size = dsizeOH1 >> log2Ceil(beatBytes) assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U) val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U) val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes)) // calculate the original size val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1) when (out.d.fire) { acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement) when (dFirst) { dOrig := dFirst_size dToggle := out.d.bits.source(fragmentBits) } } // Swallow up non-data ack fragments val doEarlyAck = earlyAck match { case EarlyAck.AllPuts => true.B case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1) case EarlyAck.None => false.B } val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast) out.d.ready := in.d.ready || drop in.d.valid := out.d.valid && !drop in.d.bits := out.d.bits // pass most stuff unchanged in.d.bits.source := out.d.bits.source >> addedBits in.d.bits.size := Mux(dFirst, dFirst_size, dOrig) if (edgeOut.manager.mayDenyPut) { val r_denied = Reg(Bool()) val d_denied = (!dFirst && r_denied) || out.d.bits.denied when (out.d.fire) { r_denied := d_denied } in.d.bits.denied := d_denied } if (edgeOut.manager.mayDenyGet) { // Take denied only from the first beat and hold that value val d_denied = out.d.bits.denied holdUnless dFirst when (dHasData) { in.d.bits.denied := d_denied in.d.bits.corrupt := d_denied || out.d.bits.corrupt } } // What maximum transfer sizes do downstream devices support? val maxArithmetics = managers.map(_.supportsArithmetic.max) val maxLogicals = managers.map(_.supportsLogical.max) val maxGets = managers.map(_.supportsGet.max) val maxPutFulls = managers.map(_.supportsPutFull.max) val maxPutPartials = managers.map(_.supportsPutPartial.max) val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0) // We assume that the request is valid => size 0 is impossible val lgMinSize = log2Ceil(minSize).U val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) // Make the request repeatable val repeater = Module(new Repeater(in.a.bits)) repeater.io.enq <> in.a val in_a = repeater.io.deq // If this is infront of a single manager, these become constants val find = manager.findFast(edgeIn.address(in_a.bits)) val maxLgArithmetic = Mux1H(find, maxLgArithmetics) val maxLgLogical = Mux1H(find, maxLgLogicals) val maxLgGet = Mux1H(find, maxLgGets) val maxLgPutFull = Mux1H(find, maxLgPutFulls) val maxLgPutPartial = Mux1H(find, maxLgPutPartials) val maxLgHint = Mux1H(find, maxLgHints) val limit = if (alwaysMin) lgMinSize else MuxLookup(in_a.bits.opcode, lgMinSize)(Array( TLMessages.PutFullData -> maxLgPutFull, TLMessages.PutPartialData -> maxLgPutPartial, TLMessages.ArithmeticData -> maxLgArithmetic, TLMessages.LogicalData -> maxLgLogical, TLMessages.Get -> maxLgGet, TLMessages.Hint -> maxLgHint)) val aOrig = in_a.bits.size val aFrag = Mux(aOrig > limit, limit, aOrig) val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize)) val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize)) val aHasData = edgeIn.hasData(in_a.bits) val aMask = Mux(aHasData, 0.U, aFragOH1) val gennum = RegInit(0.U(counterBits.W)) val aFirst = gennum === 0.U val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U) val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize))) val aLast = aFragnum === 0.U val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst)) val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None when (out.a.fire) { gennum := new_gennum } repeater.io.repeat := !aHasData && aFragnum =/= 0.U out.a <> in_a out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U) out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum)) out.a.bits.size := aFrag // Optimize away some of the Repeater's registers assert (!repeater.io.full || !aHasData) out.a.bits.data := in.a.bits.data val fullMask = ((BigInt(1) << beatBytes) - 1).U assert (!repeater.io.full || in_a.bits.mask === fullMask) out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask) out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData) // Tie off unused channels in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLFragmenter { def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { if (minSize <= maxSize) { val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix)) fragmenter.node } else { TLEphemeralNode()(ValName("no_fragmenter")) } } def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix) def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Fragmenter")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes)) (ram.node := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := TLDelayer(0.1) := TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts) := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := TLFragmenter(ramBeatBytes, maxSize/2) := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module) io.finished := dut.io.finished dut.io.start := io.start } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TLFragmenter_LLCCtrl( // @[Fragmenter.scala:92:9] input clock, // @[Fragmenter.scala:92:9] input reset, // @[Fragmenter.scala:92:9] output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [25:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [11:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [25:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [11:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire _repeater_io_full; // @[Fragmenter.scala:274:30] wire _repeater_io_enq_ready; // @[Fragmenter.scala:274:30] wire _repeater_io_deq_valid; // @[Fragmenter.scala:274:30] wire [2:0] _repeater_io_deq_bits_opcode; // @[Fragmenter.scala:274:30] wire [2:0] _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30] wire [7:0] _repeater_io_deq_bits_source; // @[Fragmenter.scala:274:30] wire [25:0] _repeater_io_deq_bits_address; // @[Fragmenter.scala:274:30] wire [7:0] _repeater_io_deq_bits_mask; // @[Fragmenter.scala:274:30] reg [2:0] acknum; // @[Fragmenter.scala:201:29] reg [2:0] dOrig; // @[Fragmenter.scala:202:24] reg dToggle; // @[Fragmenter.scala:203:30] wire dFirst = acknum == 3'h0; // @[Fragmenter.scala:201:29, :205:29] wire [5:0] _dsizeOH1_T = 6'h7 << auto_anon_out_d_bits_size; // @[package.scala:243:71] wire [2:0] _GEN = ~(auto_anon_out_d_bits_source[2:0]); // @[package.scala:241:49] wire [2:0] dFirst_size_hi = auto_anon_out_d_bits_source[2:0] & {1'h1, _GEN[2:1]}; // @[OneHot.scala:30:18] wire [2:0] _dFirst_size_T_8 = {1'h0, dFirst_size_hi[2:1]} | ~(_dsizeOH1_T[2:0]) & {_GEN[0], _dsizeOH1_T[2:1]}; // @[OneHot.scala:30:18, :31:18, :32:28] wire [2:0] dFirst_size = {|dFirst_size_hi, |(_dFirst_size_T_8[2:1]), _dFirst_size_T_8[2] | _dFirst_size_T_8[0]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}] wire drop = ~(auto_anon_out_d_bits_opcode[0]) & (|(auto_anon_out_d_bits_source[2:0])); // @[Fragmenter.scala:204:41, :206:30, :234:{20,30}] wire anonOut_d_ready = auto_anon_in_d_ready | drop; // @[Fragmenter.scala:234:30, :235:35] wire anonIn_d_valid = auto_anon_out_d_valid & ~drop; // @[Fragmenter.scala:234:30, :236:{36,39}] wire [2:0] anonIn_d_bits_size = dFirst ? dFirst_size : dOrig; // @[OneHot.scala:32:10] wire [12:0] _aOrigOH1_T = 13'h3F << _repeater_io_deq_bits_size; // @[package.scala:243:71] reg [2:0] gennum; // @[Fragmenter.scala:303:29] wire aFirst = gennum == 3'h0; // @[Fragmenter.scala:303:29, :304:29] wire [2:0] aFragnum = aFirst ? ~(_aOrigOH1_T[5:3]) : gennum - 3'h1; // @[package.scala:243:{46,71,76}] reg aToggle_r; // @[Fragmenter.scala:309:54]
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File mshrs.scala: //****************************************************************************** // Ported from Rocket-Chip // See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.lsu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.tile._ import freechips.rocketchip.util._ import freechips.rocketchip.rocket._ import boom.v3.common._ import boom.v3.exu.BrUpdateInfo import boom.v3.util.{IsKilledByBranch, GetNewBrMask, BranchKillableQueue, IsOlder, UpdateBrMask, AgePriorityEncoder, WrapInc} class BoomDCacheReqInternal(implicit p: Parameters) extends BoomDCacheReq()(p) with HasL1HellaCacheParameters { // miss info val tag_match = Bool() val old_meta = new L1Metadata val way_en = UInt(nWays.W) // Used in the MSHRs val sdq_id = UInt(log2Ceil(cfg.nSDQ).W) } class BoomMSHR(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p) with HasL1HellaCacheParameters { val io = IO(new Bundle { val id = Input(UInt()) val req_pri_val = Input(Bool()) val req_pri_rdy = Output(Bool()) val req_sec_val = Input(Bool()) val req_sec_rdy = Output(Bool()) val clear_prefetch = Input(Bool()) val brupdate = Input(new BrUpdateInfo) val exception = Input(Bool()) val rob_pnr_idx = Input(UInt(robAddrSz.W)) val rob_head_idx = Input(UInt(robAddrSz.W)) val req = Input(new BoomDCacheReqInternal) val req_is_probe = Input(Bool()) val idx = Output(Valid(UInt())) val way = Output(Valid(UInt())) val tag = Output(Valid(UInt())) val mem_acquire = Decoupled(new TLBundleA(edge.bundle)) val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle))) val mem_finish = Decoupled(new TLBundleE(edge.bundle)) val prober_state = Input(Valid(UInt(coreMaxAddrBits.W))) val refill = Decoupled(new L1DataWriteReq) val meta_write = Decoupled(new L1MetaWriteReq) val meta_read = Decoupled(new L1MetaReadReq) val meta_resp = Input(Valid(new L1Metadata)) val wb_req = Decoupled(new WritebackReq(edge.bundle)) // To inform the prefetcher when we are commiting the fetch of this line val commit_val = Output(Bool()) val commit_addr = Output(UInt(coreMaxAddrBits.W)) val commit_coh = Output(new ClientMetadata) // Reading from the line buffer val lb_read = Decoupled(new LineBufferReadReq) val lb_resp = Input(UInt(encRowBits.W)) val lb_write = Decoupled(new LineBufferWriteReq) // Replays go through the cache pipeline again val replay = Decoupled(new BoomDCacheReqInternal) // Resp go straight out to the core val resp = Decoupled(new BoomDCacheResp) // Writeback unit tells us when it is done processing our wb val wb_resp = Input(Bool()) val probe_rdy = Output(Bool()) }) // TODO: Optimize this. We don't want to mess with cache during speculation // s_refill_req : Make a request for a new cache line // s_refill_resp : Store the refill response into our buffer // s_drain_rpq_loads : Drain out loads from the rpq // : If miss was misspeculated, go to s_invalid // s_wb_req : Write back the evicted cache line // s_wb_resp : Finish writing back the evicted cache line // s_meta_write_req : Write the metadata for new cache lne // s_meta_write_resp : val s_invalid :: s_refill_req :: s_refill_resp :: s_drain_rpq_loads :: s_meta_read :: s_meta_resp_1 :: s_meta_resp_2 :: s_meta_clear :: s_wb_meta_read :: s_wb_req :: s_wb_resp :: s_commit_line :: s_drain_rpq :: s_meta_write_req :: s_mem_finish_1 :: s_mem_finish_2 :: s_prefetched :: s_prefetch :: Nil = Enum(18) val state = RegInit(s_invalid) val req = Reg(new BoomDCacheReqInternal) val req_idx = req.addr(untagBits-1, blockOffBits) val req_tag = req.addr >> untagBits val req_block_addr = (req.addr >> blockOffBits) << blockOffBits val req_needs_wb = RegInit(false.B) val new_coh = RegInit(ClientMetadata.onReset) val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH) val grow_param = new_coh.onAccess(req.uop.mem_cmd)._2 val coh_on_grant = new_coh.onGrant(req.uop.mem_cmd, io.mem_grant.bits.param) // We only accept secondary misses if the original request had sufficient permissions val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) = new_coh.onSecondaryAccess(req.uop.mem_cmd, io.req.uop.mem_cmd) val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant) val sec_rdy = (!cmd_requires_second_acquire && !io.req_is_probe && !state.isOneOf(s_invalid, s_meta_write_req, s_mem_finish_1, s_mem_finish_2))// Always accept secondary misses val rpq = Module(new BranchKillableQueue(new BoomDCacheReqInternal, cfg.nRPQ, u => u.uses_ldq, false)) rpq.io.brupdate := io.brupdate rpq.io.flush := io.exception assert(!(state === s_invalid && !rpq.io.empty)) rpq.io.enq.valid := ((io.req_pri_val && io.req_pri_rdy) || (io.req_sec_val && io.req_sec_rdy)) && !isPrefetch(io.req.uop.mem_cmd) rpq.io.enq.bits := io.req rpq.io.deq.ready := false.B val grantack = Reg(Valid(new TLBundleE(edge.bundle))) val refill_ctr = Reg(UInt(log2Ceil(cacheDataBeats).W)) val commit_line = Reg(Bool()) val grant_had_data = Reg(Bool()) val finish_to_prefetch = Reg(Bool()) // Block probes if a tag write we started is still in the pipeline val meta_hazard = RegInit(0.U(2.W)) when (meta_hazard =/= 0.U) { meta_hazard := meta_hazard + 1.U } when (io.meta_write.fire) { meta_hazard := 1.U } io.probe_rdy := (meta_hazard === 0.U && (state.isOneOf(s_invalid, s_refill_req, s_refill_resp, s_drain_rpq_loads) || (state === s_meta_read && grantack.valid))) io.idx.valid := state =/= s_invalid io.tag.valid := state =/= s_invalid io.way.valid := !state.isOneOf(s_invalid, s_prefetch) io.idx.bits := req_idx io.tag.bits := req_tag io.way.bits := req.way_en io.meta_write.valid := false.B io.meta_write.bits := DontCare io.req_pri_rdy := false.B io.req_sec_rdy := sec_rdy && rpq.io.enq.ready io.mem_acquire.valid := false.B io.mem_acquire.bits := DontCare io.refill.valid := false.B io.refill.bits := DontCare io.replay.valid := false.B io.replay.bits := DontCare io.wb_req.valid := false.B io.wb_req.bits := DontCare io.resp.valid := false.B io.resp.bits := DontCare io.commit_val := false.B io.commit_addr := req.addr io.commit_coh := coh_on_grant io.meta_read.valid := false.B io.meta_read.bits := DontCare io.mem_finish.valid := false.B io.mem_finish.bits := DontCare io.lb_write.valid := false.B io.lb_write.bits := DontCare io.lb_read.valid := false.B io.lb_read.bits := DontCare io.mem_grant.ready := false.B when (io.req_sec_val && io.req_sec_rdy) { req.uop.mem_cmd := dirtier_cmd when (is_hit_again) { new_coh := dirtier_coh } } def handle_pri_req(old_state: UInt): UInt = { val new_state = WireInit(old_state) grantack.valid := false.B refill_ctr := 0.U assert(rpq.io.enq.ready) req := io.req val old_coh = io.req.old_meta.coh req_needs_wb := old_coh.onCacheControl(M_FLUSH)._1 // does the line we are evicting need to be written back when (io.req.tag_match) { val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req.uop.mem_cmd) when (is_hit) { // set dirty bit assert(isWrite(io.req.uop.mem_cmd)) new_coh := coh_on_hit new_state := s_drain_rpq } .otherwise { // upgrade permissions new_coh := old_coh new_state := s_refill_req } } .otherwise { // refill and writeback if necessary new_coh := ClientMetadata.onReset new_state := s_refill_req } new_state } when (state === s_invalid) { io.req_pri_rdy := true.B grant_had_data := false.B when (io.req_pri_val && io.req_pri_rdy) { state := handle_pri_req(state) } } .elsewhen (state === s_refill_req) { io.mem_acquire.valid := true.B // TODO: Use AcquirePerm if just doing permissions acquire io.mem_acquire.bits := edge.AcquireBlock( fromSource = io.id, toAddress = Cat(req_tag, req_idx) << blockOffBits, lgSize = lgCacheBlockBytes.U, growPermissions = grow_param)._2 when (io.mem_acquire.fire) { state := s_refill_resp } } .elsewhen (state === s_refill_resp) { when (edge.hasData(io.mem_grant.bits)) { io.mem_grant.ready := io.lb_write.ready io.lb_write.valid := io.mem_grant.valid io.lb_write.bits.id := io.id io.lb_write.bits.offset := refill_address_inc >> rowOffBits io.lb_write.bits.data := io.mem_grant.bits.data } .otherwise { io.mem_grant.ready := true.B } when (io.mem_grant.fire) { grant_had_data := edge.hasData(io.mem_grant.bits) } when (refill_done) { grantack.valid := edge.isRequest(io.mem_grant.bits) grantack.bits := edge.GrantAck(io.mem_grant.bits) state := Mux(grant_had_data, s_drain_rpq_loads, s_drain_rpq) assert(!(!grant_had_data && req_needs_wb)) commit_line := false.B new_coh := coh_on_grant } } .elsewhen (state === s_drain_rpq_loads) { val drain_load = (isRead(rpq.io.deq.bits.uop.mem_cmd) && !isWrite(rpq.io.deq.bits.uop.mem_cmd) && (rpq.io.deq.bits.uop.mem_cmd =/= M_XLR)) // LR should go through replay // drain all loads for now val rp_addr = Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)) val word_idx = if (rowWords == 1) 0.U else rp_addr(log2Up(rowWords*coreDataBytes)-1, log2Up(wordBytes)) val data = io.lb_resp val data_word = data >> Cat(word_idx, 0.U(log2Up(coreDataBits).W)) val loadgen = new LoadGen(rpq.io.deq.bits.uop.mem_size, rpq.io.deq.bits.uop.mem_signed, Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)), data_word, false.B, wordBytes) rpq.io.deq.ready := io.resp.ready && io.lb_read.ready && drain_load io.lb_read.valid := rpq.io.deq.valid && drain_load io.lb_read.bits.id := io.id io.lb_read.bits.offset := rpq.io.deq.bits.addr >> rowOffBits io.resp.valid := rpq.io.deq.valid && io.lb_read.fire && drain_load io.resp.bits.uop := rpq.io.deq.bits.uop io.resp.bits.data := loadgen.data io.resp.bits.is_hella := rpq.io.deq.bits.is_hella when (rpq.io.deq.fire) { commit_line := true.B } .elsewhen (rpq.io.empty && !commit_line) { when (!rpq.io.enq.fire) { state := s_mem_finish_1 finish_to_prefetch := enablePrefetching.B } } .elsewhen (rpq.io.empty || (rpq.io.deq.valid && !drain_load)) { // io.commit_val is for the prefetcher. it tells the prefetcher that this line was correctly acquired // The prefetcher should consider fetching the next line io.commit_val := true.B state := s_meta_read } } .elsewhen (state === s_meta_read) { io.meta_read.valid := !io.prober_state.valid || !grantack.valid || (io.prober_state.bits(untagBits-1,blockOffBits) =/= req_idx) io.meta_read.bits.idx := req_idx io.meta_read.bits.tag := req_tag io.meta_read.bits.way_en := req.way_en when (io.meta_read.fire) { state := s_meta_resp_1 } } .elsewhen (state === s_meta_resp_1) { state := s_meta_resp_2 } .elsewhen (state === s_meta_resp_2) { val needs_wb = io.meta_resp.bits.coh.onCacheControl(M_FLUSH)._1 state := Mux(!io.meta_resp.valid, s_meta_read, // Prober could have nack'd this read Mux(needs_wb, s_meta_clear, s_commit_line)) } .elsewhen (state === s_meta_clear) { io.meta_write.valid := true.B io.meta_write.bits.idx := req_idx io.meta_write.bits.data.coh := coh_on_clear io.meta_write.bits.data.tag := req_tag io.meta_write.bits.way_en := req.way_en when (io.meta_write.fire) { state := s_wb_req } } .elsewhen (state === s_wb_req) { io.wb_req.valid := true.B io.wb_req.bits.tag := req.old_meta.tag io.wb_req.bits.idx := req_idx io.wb_req.bits.param := shrink_param io.wb_req.bits.way_en := req.way_en io.wb_req.bits.source := io.id io.wb_req.bits.voluntary := true.B when (io.wb_req.fire) { state := s_wb_resp } } .elsewhen (state === s_wb_resp) { when (io.wb_resp) { state := s_commit_line } } .elsewhen (state === s_commit_line) { io.lb_read.valid := true.B io.lb_read.bits.id := io.id io.lb_read.bits.offset := refill_ctr io.refill.valid := io.lb_read.fire io.refill.bits.addr := req_block_addr | (refill_ctr << rowOffBits) io.refill.bits.way_en := req.way_en io.refill.bits.wmask := ~(0.U(rowWords.W)) io.refill.bits.data := io.lb_resp when (io.refill.fire) { refill_ctr := refill_ctr + 1.U when (refill_ctr === (cacheDataBeats - 1).U) { state := s_drain_rpq } } } .elsewhen (state === s_drain_rpq) { io.replay <> rpq.io.deq io.replay.bits.way_en := req.way_en io.replay.bits.addr := Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)) when (io.replay.fire && isWrite(rpq.io.deq.bits.uop.mem_cmd)) { // Set dirty bit val (is_hit, _, coh_on_hit) = new_coh.onAccess(rpq.io.deq.bits.uop.mem_cmd) assert(is_hit, "We still don't have permissions for this store") new_coh := coh_on_hit } when (rpq.io.empty && !rpq.io.enq.valid) { state := s_meta_write_req } } .elsewhen (state === s_meta_write_req) { io.meta_write.valid := true.B io.meta_write.bits.idx := req_idx io.meta_write.bits.data.coh := new_coh io.meta_write.bits.data.tag := req_tag io.meta_write.bits.way_en := req.way_en when (io.meta_write.fire) { state := s_mem_finish_1 finish_to_prefetch := false.B } } .elsewhen (state === s_mem_finish_1) { io.mem_finish.valid := grantack.valid io.mem_finish.bits := grantack.bits when (io.mem_finish.fire || !grantack.valid) { grantack.valid := false.B state := s_mem_finish_2 } } .elsewhen (state === s_mem_finish_2) { state := Mux(finish_to_prefetch, s_prefetch, s_invalid) } .elsewhen (state === s_prefetch) { io.req_pri_rdy := true.B when ((io.req_sec_val && !io.req_sec_rdy) || io.clear_prefetch) { state := s_invalid } .elsewhen (io.req_sec_val && io.req_sec_rdy) { val (is_hit, _, coh_on_hit) = new_coh.onAccess(io.req.uop.mem_cmd) when (is_hit) { // Proceed with refill new_coh := coh_on_hit state := s_meta_read } .otherwise { // Reacquire this line new_coh := ClientMetadata.onReset state := s_refill_req } } .elsewhen (io.req_pri_val && io.req_pri_rdy) { grant_had_data := false.B state := handle_pri_req(state) } } } class BoomIOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p) with HasL1HellaCacheParameters { val io = IO(new Bundle { val req = Flipped(Decoupled(new BoomDCacheReq)) val resp = Decoupled(new BoomDCacheResp) val mem_access = Decoupled(new TLBundleA(edge.bundle)) val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle))) // We don't need brupdate in here because uncacheable operations are guaranteed non-speculative }) def beatOffset(addr: UInt) = addr.extract(beatOffBits-1, wordOffBits) def wordFromBeat(addr: UInt, dat: UInt) = { val shift = Cat(beatOffset(addr), 0.U((wordOffBits+log2Ceil(wordBytes)).W)) (dat >> shift)(wordBits-1, 0) } val req = Reg(new BoomDCacheReq) val grant_word = Reg(UInt(wordBits.W)) val s_idle :: s_mem_access :: s_mem_ack :: s_resp :: Nil = Enum(4) val state = RegInit(s_idle) io.req.ready := state === s_idle val loadgen = new LoadGen(req.uop.mem_size, req.uop.mem_signed, req.addr, grant_word, false.B, wordBytes) val a_source = id.U val a_address = req.addr val a_size = req.uop.mem_size val a_data = Fill(beatWords, req.data) val get = edge.Get(a_source, a_address, a_size)._2 val put = edge.Put(a_source, a_address, a_size, a_data)._2 val atomics = if (edge.manager.anySupportLogical) { MuxLookup(req.uop.mem_cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array( M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2, M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2, M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2, M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2, M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2, M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2, M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2, M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2, M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2)) } else { // If no managers support atomics, assert fail if processor asks for them assert(state === s_idle || !isAMO(req.uop.mem_cmd)) (0.U).asTypeOf(new TLBundleA(edge.bundle)) } assert(state === s_idle || req.uop.mem_cmd =/= M_XSC) io.mem_access.valid := state === s_mem_access io.mem_access.bits := Mux(isAMO(req.uop.mem_cmd), atomics, Mux(isRead(req.uop.mem_cmd), get, put)) val send_resp = isRead(req.uop.mem_cmd) io.resp.valid := (state === s_resp) && send_resp io.resp.bits.is_hella := req.is_hella io.resp.bits.uop := req.uop io.resp.bits.data := loadgen.data when (io.req.fire) { req := io.req.bits state := s_mem_access } when (io.mem_access.fire) { state := s_mem_ack } when (state === s_mem_ack && io.mem_ack.valid) { state := s_resp when (isRead(req.uop.mem_cmd)) { grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data) } } when (state === s_resp) { when (!send_resp || io.resp.fire) { state := s_idle } } } class LineBufferReadReq(implicit p: Parameters) extends BoomBundle()(p) with HasL1HellaCacheParameters { val id = UInt(log2Ceil(nLBEntries).W) val offset = UInt(log2Ceil(cacheDataBeats).W) def lb_addr = Cat(id, offset) } class LineBufferWriteReq(implicit p: Parameters) extends LineBufferReadReq()(p) { val data = UInt(encRowBits.W) } class LineBufferMetaWriteReq(implicit p: Parameters) extends BoomBundle()(p) { val id = UInt(log2Ceil(nLBEntries).W) val coh = new ClientMetadata val addr = UInt(coreMaxAddrBits.W) } class LineBufferMeta(implicit p: Parameters) extends BoomBundle()(p) with HasL1HellaCacheParameters { val coh = new ClientMetadata val addr = UInt(coreMaxAddrBits.W) } class BoomMSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p) with HasL1HellaCacheParameters { val io = IO(new Bundle { val req = Flipped(Vec(memWidth, Decoupled(new BoomDCacheReqInternal))) // Req from s2 of DCache pipe val req_is_probe = Input(Vec(memWidth, Bool())) val resp = Decoupled(new BoomDCacheResp) val secondary_miss = Output(Vec(memWidth, Bool())) val block_hit = Output(Vec(memWidth, Bool())) val brupdate = Input(new BrUpdateInfo) val exception = Input(Bool()) val rob_pnr_idx = Input(UInt(robAddrSz.W)) val rob_head_idx = Input(UInt(robAddrSz.W)) val mem_acquire = Decoupled(new TLBundleA(edge.bundle)) val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle))) val mem_finish = Decoupled(new TLBundleE(edge.bundle)) val refill = Decoupled(new L1DataWriteReq) val meta_write = Decoupled(new L1MetaWriteReq) val meta_read = Decoupled(new L1MetaReadReq) val meta_resp = Input(Valid(new L1Metadata)) val replay = Decoupled(new BoomDCacheReqInternal) val prefetch = Decoupled(new BoomDCacheReq) val wb_req = Decoupled(new WritebackReq(edge.bundle)) val prober_state = Input(Valid(UInt(coreMaxAddrBits.W))) val clear_all = Input(Bool()) // Clears all uncommitted MSHRs to prepare for fence val wb_resp = Input(Bool()) val fence_rdy = Output(Bool()) val probe_rdy = Output(Bool()) }) val req_idx = OHToUInt(io.req.map(_.valid)) val req = io.req(req_idx) val req_is_probe = io.req_is_probe(0) for (w <- 0 until memWidth) io.req(w).ready := false.B val prefetcher: DataPrefetcher = if (enablePrefetching) Module(new NLPrefetcher) else Module(new NullPrefetcher) io.prefetch <> prefetcher.io.prefetch val cacheable = edge.manager.supportsAcquireBFast(req.bits.addr, lgCacheBlockBytes.U) // -------------------- // The MSHR SDQ val sdq_val = RegInit(0.U(cfg.nSDQ.W)) val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0)) val sdq_rdy = !sdq_val.andR val sdq_enq = req.fire && cacheable && isWrite(req.bits.uop.mem_cmd) val sdq = Mem(cfg.nSDQ, UInt(coreDataBits.W)) when (sdq_enq) { sdq(sdq_alloc_id) := req.bits.data } // -------------------- // The LineBuffer Data // Holds refilling lines, prefetched lines val lb = Mem(nLBEntries * cacheDataBeats, UInt(encRowBits.W)) val lb_read_arb = Module(new Arbiter(new LineBufferReadReq, cfg.nMSHRs)) val lb_write_arb = Module(new Arbiter(new LineBufferWriteReq, cfg.nMSHRs)) lb_read_arb.io.out.ready := false.B lb_write_arb.io.out.ready := true.B val lb_read_data = WireInit(0.U(encRowBits.W)) when (lb_write_arb.io.out.fire) { lb.write(lb_write_arb.io.out.bits.lb_addr, lb_write_arb.io.out.bits.data) } .otherwise { lb_read_arb.io.out.ready := true.B when (lb_read_arb.io.out.fire) { lb_read_data := lb.read(lb_read_arb.io.out.bits.lb_addr) } } def widthMap[T <: Data](f: Int => T) = VecInit((0 until memWidth).map(f)) val idx_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool()))) val tag_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool()))) val way_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool()))) val tag_match = widthMap(w => Mux1H(idx_matches(w), tag_matches(w))) val idx_match = widthMap(w => idx_matches(w).reduce(_||_)) val way_match = widthMap(w => Mux1H(idx_matches(w), way_matches(w))) val wb_tag_list = Wire(Vec(cfg.nMSHRs, UInt(tagBits.W))) val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq , cfg.nMSHRs)) val meta_read_arb = Module(new Arbiter(new L1MetaReadReq , cfg.nMSHRs)) val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs)) val replay_arb = Module(new Arbiter(new BoomDCacheReqInternal , cfg.nMSHRs)) val resp_arb = Module(new Arbiter(new BoomDCacheResp , cfg.nMSHRs + nIOMSHRs)) val refill_arb = Module(new Arbiter(new L1DataWriteReq , cfg.nMSHRs)) val commit_vals = Wire(Vec(cfg.nMSHRs, Bool())) val commit_addrs = Wire(Vec(cfg.nMSHRs, UInt(coreMaxAddrBits.W))) val commit_cohs = Wire(Vec(cfg.nMSHRs, new ClientMetadata)) var sec_rdy = false.B io.fence_rdy := true.B io.probe_rdy := true.B io.mem_grant.ready := false.B val mshr_alloc_idx = Wire(UInt()) val pri_rdy = WireInit(false.B) val pri_val = req.valid && sdq_rdy && cacheable && !idx_match(req_idx) val mshrs = (0 until cfg.nMSHRs) map { i => val mshr = Module(new BoomMSHR) mshr.io.id := i.U(log2Ceil(cfg.nMSHRs).W) for (w <- 0 until memWidth) { idx_matches(w)(i) := mshr.io.idx.valid && mshr.io.idx.bits === io.req(w).bits.addr(untagBits-1,blockOffBits) tag_matches(w)(i) := mshr.io.tag.valid && mshr.io.tag.bits === io.req(w).bits.addr >> untagBits way_matches(w)(i) := mshr.io.way.valid && mshr.io.way.bits === io.req(w).bits.way_en } wb_tag_list(i) := mshr.io.wb_req.bits.tag mshr.io.req_pri_val := (i.U === mshr_alloc_idx) && pri_val when (i.U === mshr_alloc_idx) { pri_rdy := mshr.io.req_pri_rdy } mshr.io.req_sec_val := req.valid && sdq_rdy && tag_match(req_idx) && idx_matches(req_idx)(i) && cacheable mshr.io.req := req.bits mshr.io.req_is_probe := req_is_probe mshr.io.req.sdq_id := sdq_alloc_id // Clear because of a FENCE, a request to the same idx as a prefetched line, // a probe to that prefetched line, all mshrs are in use mshr.io.clear_prefetch := ((io.clear_all && !req.valid)|| (req.valid && idx_matches(req_idx)(i) && cacheable && !tag_match(req_idx)) || (req_is_probe && idx_matches(req_idx)(i))) mshr.io.brupdate := io.brupdate mshr.io.exception := io.exception mshr.io.rob_pnr_idx := io.rob_pnr_idx mshr.io.rob_head_idx := io.rob_head_idx mshr.io.prober_state := io.prober_state mshr.io.wb_resp := io.wb_resp meta_write_arb.io.in(i) <> mshr.io.meta_write meta_read_arb.io.in(i) <> mshr.io.meta_read mshr.io.meta_resp := io.meta_resp wb_req_arb.io.in(i) <> mshr.io.wb_req replay_arb.io.in(i) <> mshr.io.replay refill_arb.io.in(i) <> mshr.io.refill lb_read_arb.io.in(i) <> mshr.io.lb_read mshr.io.lb_resp := lb_read_data lb_write_arb.io.in(i) <> mshr.io.lb_write commit_vals(i) := mshr.io.commit_val commit_addrs(i) := mshr.io.commit_addr commit_cohs(i) := mshr.io.commit_coh mshr.io.mem_grant.valid := false.B mshr.io.mem_grant.bits := DontCare when (io.mem_grant.bits.source === i.U) { mshr.io.mem_grant <> io.mem_grant } sec_rdy = sec_rdy || (mshr.io.req_sec_rdy && mshr.io.req_sec_val) resp_arb.io.in(i) <> mshr.io.resp when (!mshr.io.req_pri_rdy) { io.fence_rdy := false.B } for (w <- 0 until memWidth) { when (!mshr.io.probe_rdy && idx_matches(w)(i) && io.req_is_probe(w)) { io.probe_rdy := false.B } } mshr } // Try to round-robin the MSHRs val mshr_head = RegInit(0.U(log2Ceil(cfg.nMSHRs).W)) mshr_alloc_idx := RegNext(AgePriorityEncoder(mshrs.map(m=>m.io.req_pri_rdy), mshr_head)) when (pri_rdy && pri_val) { mshr_head := WrapInc(mshr_head, cfg.nMSHRs) } io.meta_write <> meta_write_arb.io.out io.meta_read <> meta_read_arb.io.out io.wb_req <> wb_req_arb.io.out val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs)) var mmio_rdy = false.B val mmios = (0 until nIOMSHRs) map { i => val id = cfg.nMSHRs + 1 + i // +1 for wb unit val mshr = Module(new BoomIOMSHR(id)) mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready mmio_alloc_arb.io.in(i).bits := DontCare mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready mshr.io.req.bits := req.bits mmio_rdy = mmio_rdy || mshr.io.req.ready mshr.io.mem_ack.bits := io.mem_grant.bits mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === id.U when (io.mem_grant.bits.source === id.U) { io.mem_grant.ready := true.B } resp_arb.io.in(cfg.nMSHRs + i) <> mshr.io.resp when (!mshr.io.req.ready) { io.fence_rdy := false.B } mshr } mmio_alloc_arb.io.out.ready := req.valid && !cacheable TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access)) TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish)) val respq = Module(new BranchKillableQueue(new BoomDCacheResp, 4, u => u.uses_ldq, flow = false)) respq.io.brupdate := io.brupdate respq.io.flush := io.exception respq.io.enq <> resp_arb.io.out io.resp <> respq.io.deq for (w <- 0 until memWidth) { io.req(w).ready := (w.U === req_idx) && Mux(!cacheable, mmio_rdy, sdq_rdy && Mux(idx_match(w), tag_match(w) && sec_rdy, pri_rdy)) io.secondary_miss(w) := idx_match(w) && way_match(w) && !tag_match(w) io.block_hit(w) := idx_match(w) && tag_match(w) } io.refill <> refill_arb.io.out val free_sdq = io.replay.fire && isWrite(io.replay.bits.uop.mem_cmd) io.replay <> replay_arb.io.out io.replay.bits.data := sdq(replay_arb.io.out.bits.sdq_id) when (io.replay.valid || sdq_enq) { sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) | PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq) } prefetcher.io.mshr_avail := RegNext(pri_rdy) prefetcher.io.req_val := RegNext(commit_vals.reduce(_||_)) prefetcher.io.req_addr := RegNext(Mux1H(commit_vals, commit_addrs)) prefetcher.io.req_coh := RegNext(Mux1H(commit_vals, commit_cohs)) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File AMOALU.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) { val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W)) size := typ val dat_padded = dat.pad(maxSize*8) def misaligned: Bool = (addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR def mask = { var res = 1.U for (i <- 0 until log2Up(maxSize)) { val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U) val lower = Mux(addr(i), 0.U, res) res = Cat(upper, lower) } res } protected def genData(i: Int): UInt = if (i >= log2Up(maxSize)) dat_padded else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1)) def data = genData(0) def wordData = genData(2) } class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) { private val size = new StoreGen(typ, addr, dat, maxSize).size private def genData(logMinSize: Int): UInt = { var res = dat for (i <- log2Up(maxSize)-1 to logMinSize by -1) { val pos = 8 << i val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0)) val doZero = (i == 0).B && zero val zeroed = Mux(doZero, 0.U, shifted) res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed) } res } def wordData = genData(2) def data = genData(0) } class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module { val minXLen = 32 val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _) val io = IO(new Bundle { val mask = Input(UInt((operandBits / 8).W)) val cmd = Input(UInt(M_SZ.W)) val lhs = Input(UInt(operandBits.W)) val rhs = Input(UInt(operandBits.W)) val out = Output(UInt(operandBits.W)) val out_unmasked = Output(UInt(operandBits.W)) }) val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU val add = io.cmd === M_XA_ADD val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR val adder_out = { // partition the carry chain to support sub-xLen addition val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_) (io.lhs & mask) + (io.rhs & mask) } val less = { // break up the comparator so the lower parts will be CSE'd def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = { if (n == minXLen) x(n-1, 0) < y(n-1, 0) else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2) } def isLess(x: UInt, y: UInt, n: Int): Bool = { val signed = { val mask = M_XA_MIN ^ M_XA_MINU (io.cmd & mask) === (M_XA_MIN & mask) } Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1))) } PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w)))) } val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs) val logic = Mux(logic_and, io.lhs & io.rhs, 0.U) | Mux(logic_xor, io.lhs ^ io.rhs, 0.U) val out = Mux(add, adder_out, Mux(logic_and || logic_xor, logic, minmax)) val wmask = FillInterleaved(8, io.mask) io.out := wmask & out | ~wmask & io.lhs io.out_unmasked := out }
module BoomIOMSHR_1( // @[mshrs.scala:402:7] input clock, // @[mshrs.scala:402:7] input reset, // @[mshrs.scala:402:7] output io_req_ready, // @[mshrs.scala:405:14] input io_req_valid, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_uopc, // @[mshrs.scala:405:14] input [31:0] io_req_bits_uop_inst, // @[mshrs.scala:405:14] input [31:0] io_req_bits_uop_debug_inst, // @[mshrs.scala:405:14] input io_req_bits_uop_is_rvc, // @[mshrs.scala:405:14] input [39:0] io_req_bits_uop_debug_pc, // @[mshrs.scala:405:14] input [2:0] io_req_bits_uop_iq_type, // @[mshrs.scala:405:14] input [9:0] io_req_bits_uop_fu_code, // @[mshrs.scala:405:14] input [3:0] io_req_bits_uop_ctrl_br_type, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_ctrl_op1_sel, // @[mshrs.scala:405:14] input [2:0] io_req_bits_uop_ctrl_op2_sel, // @[mshrs.scala:405:14] input [2:0] io_req_bits_uop_ctrl_imm_sel, // @[mshrs.scala:405:14] input [4:0] io_req_bits_uop_ctrl_op_fcn, // @[mshrs.scala:405:14] input io_req_bits_uop_ctrl_fcn_dw, // @[mshrs.scala:405:14] input [2:0] io_req_bits_uop_ctrl_csr_cmd, // @[mshrs.scala:405:14] input io_req_bits_uop_ctrl_is_load, // @[mshrs.scala:405:14] input io_req_bits_uop_ctrl_is_sta, // @[mshrs.scala:405:14] input io_req_bits_uop_ctrl_is_std, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_iw_state, // @[mshrs.scala:405:14] input io_req_bits_uop_iw_p1_poisoned, // @[mshrs.scala:405:14] input io_req_bits_uop_iw_p2_poisoned, // @[mshrs.scala:405:14] input io_req_bits_uop_is_br, // @[mshrs.scala:405:14] input io_req_bits_uop_is_jalr, // @[mshrs.scala:405:14] input io_req_bits_uop_is_jal, // @[mshrs.scala:405:14] input io_req_bits_uop_is_sfb, // @[mshrs.scala:405:14] input [15:0] io_req_bits_uop_br_mask, // @[mshrs.scala:405:14] input [3:0] io_req_bits_uop_br_tag, // @[mshrs.scala:405:14] input [4:0] io_req_bits_uop_ftq_idx, // @[mshrs.scala:405:14] input io_req_bits_uop_edge_inst, // @[mshrs.scala:405:14] input [5:0] io_req_bits_uop_pc_lob, // @[mshrs.scala:405:14] input io_req_bits_uop_taken, // @[mshrs.scala:405:14] input [19:0] io_req_bits_uop_imm_packed, // @[mshrs.scala:405:14] input [11:0] io_req_bits_uop_csr_addr, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_rob_idx, // @[mshrs.scala:405:14] input [4:0] io_req_bits_uop_ldq_idx, // @[mshrs.scala:405:14] input [4:0] io_req_bits_uop_stq_idx, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_rxq_idx, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_pdst, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_prs1, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_prs2, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_prs3, // @[mshrs.scala:405:14] input [4:0] io_req_bits_uop_ppred, // @[mshrs.scala:405:14] input io_req_bits_uop_prs1_busy, // @[mshrs.scala:405:14] input io_req_bits_uop_prs2_busy, // @[mshrs.scala:405:14] input io_req_bits_uop_prs3_busy, // @[mshrs.scala:405:14] input io_req_bits_uop_ppred_busy, // @[mshrs.scala:405:14] input [6:0] io_req_bits_uop_stale_pdst, // @[mshrs.scala:405:14] input io_req_bits_uop_exception, // @[mshrs.scala:405:14] input [63:0] io_req_bits_uop_exc_cause, // @[mshrs.scala:405:14] input io_req_bits_uop_bypassable, // @[mshrs.scala:405:14] input [4:0] io_req_bits_uop_mem_cmd, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_mem_size, // @[mshrs.scala:405:14] input io_req_bits_uop_mem_signed, // @[mshrs.scala:405:14] input io_req_bits_uop_is_fence, // @[mshrs.scala:405:14] input io_req_bits_uop_is_fencei, // @[mshrs.scala:405:14] input io_req_bits_uop_is_amo, // @[mshrs.scala:405:14] input io_req_bits_uop_uses_ldq, // @[mshrs.scala:405:14] input io_req_bits_uop_uses_stq, // @[mshrs.scala:405:14] input io_req_bits_uop_is_sys_pc2epc, // @[mshrs.scala:405:14] input io_req_bits_uop_is_unique, // @[mshrs.scala:405:14] input io_req_bits_uop_flush_on_commit, // @[mshrs.scala:405:14] input io_req_bits_uop_ldst_is_rs1, // @[mshrs.scala:405:14] input [5:0] io_req_bits_uop_ldst, // @[mshrs.scala:405:14] input [5:0] io_req_bits_uop_lrs1, // @[mshrs.scala:405:14] input [5:0] io_req_bits_uop_lrs2, // @[mshrs.scala:405:14] input [5:0] io_req_bits_uop_lrs3, // @[mshrs.scala:405:14] input io_req_bits_uop_ldst_val, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_dst_rtype, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_lrs1_rtype, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_lrs2_rtype, // @[mshrs.scala:405:14] input io_req_bits_uop_frs3_en, // @[mshrs.scala:405:14] input io_req_bits_uop_fp_val, // @[mshrs.scala:405:14] input io_req_bits_uop_fp_single, // @[mshrs.scala:405:14] input io_req_bits_uop_xcpt_pf_if, // @[mshrs.scala:405:14] input io_req_bits_uop_xcpt_ae_if, // @[mshrs.scala:405:14] input io_req_bits_uop_xcpt_ma_if, // @[mshrs.scala:405:14] input io_req_bits_uop_bp_debug_if, // @[mshrs.scala:405:14] input io_req_bits_uop_bp_xcpt_if, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_debug_fsrc, // @[mshrs.scala:405:14] input [1:0] io_req_bits_uop_debug_tsrc, // @[mshrs.scala:405:14] input [39:0] io_req_bits_addr, // @[mshrs.scala:405:14] input [63:0] io_req_bits_data, // @[mshrs.scala:405:14] input io_req_bits_is_hella, // @[mshrs.scala:405:14] input io_resp_ready, // @[mshrs.scala:405:14] output io_resp_valid, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_uopc, // @[mshrs.scala:405:14] output [31:0] io_resp_bits_uop_inst, // @[mshrs.scala:405:14] output [31:0] io_resp_bits_uop_debug_inst, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_rvc, // @[mshrs.scala:405:14] output [39:0] io_resp_bits_uop_debug_pc, // @[mshrs.scala:405:14] output [2:0] io_resp_bits_uop_iq_type, // @[mshrs.scala:405:14] output [9:0] io_resp_bits_uop_fu_code, // @[mshrs.scala:405:14] output [3:0] io_resp_bits_uop_ctrl_br_type, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_ctrl_op1_sel, // @[mshrs.scala:405:14] output [2:0] io_resp_bits_uop_ctrl_op2_sel, // @[mshrs.scala:405:14] output [2:0] io_resp_bits_uop_ctrl_imm_sel, // @[mshrs.scala:405:14] output [4:0] io_resp_bits_uop_ctrl_op_fcn, // @[mshrs.scala:405:14] output io_resp_bits_uop_ctrl_fcn_dw, // @[mshrs.scala:405:14] output [2:0] io_resp_bits_uop_ctrl_csr_cmd, // @[mshrs.scala:405:14] output io_resp_bits_uop_ctrl_is_load, // @[mshrs.scala:405:14] output io_resp_bits_uop_ctrl_is_sta, // @[mshrs.scala:405:14] output io_resp_bits_uop_ctrl_is_std, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_iw_state, // @[mshrs.scala:405:14] output io_resp_bits_uop_iw_p1_poisoned, // @[mshrs.scala:405:14] output io_resp_bits_uop_iw_p2_poisoned, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_br, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_jalr, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_jal, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_sfb, // @[mshrs.scala:405:14] output [15:0] io_resp_bits_uop_br_mask, // @[mshrs.scala:405:14] output [3:0] io_resp_bits_uop_br_tag, // @[mshrs.scala:405:14] output [4:0] io_resp_bits_uop_ftq_idx, // @[mshrs.scala:405:14] output io_resp_bits_uop_edge_inst, // @[mshrs.scala:405:14] output [5:0] io_resp_bits_uop_pc_lob, // @[mshrs.scala:405:14] output io_resp_bits_uop_taken, // @[mshrs.scala:405:14] output [19:0] io_resp_bits_uop_imm_packed, // @[mshrs.scala:405:14] output [11:0] io_resp_bits_uop_csr_addr, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_rob_idx, // @[mshrs.scala:405:14] output [4:0] io_resp_bits_uop_ldq_idx, // @[mshrs.scala:405:14] output [4:0] io_resp_bits_uop_stq_idx, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_rxq_idx, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_pdst, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_prs1, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_prs2, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_prs3, // @[mshrs.scala:405:14] output [4:0] io_resp_bits_uop_ppred, // @[mshrs.scala:405:14] output io_resp_bits_uop_prs1_busy, // @[mshrs.scala:405:14] output io_resp_bits_uop_prs2_busy, // @[mshrs.scala:405:14] output io_resp_bits_uop_prs3_busy, // @[mshrs.scala:405:14] output io_resp_bits_uop_ppred_busy, // @[mshrs.scala:405:14] output [6:0] io_resp_bits_uop_stale_pdst, // @[mshrs.scala:405:14] output io_resp_bits_uop_exception, // @[mshrs.scala:405:14] output [63:0] io_resp_bits_uop_exc_cause, // @[mshrs.scala:405:14] output io_resp_bits_uop_bypassable, // @[mshrs.scala:405:14] output [4:0] io_resp_bits_uop_mem_cmd, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_mem_size, // @[mshrs.scala:405:14] output io_resp_bits_uop_mem_signed, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_fence, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_fencei, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_amo, // @[mshrs.scala:405:14] output io_resp_bits_uop_uses_ldq, // @[mshrs.scala:405:14] output io_resp_bits_uop_uses_stq, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_sys_pc2epc, // @[mshrs.scala:405:14] output io_resp_bits_uop_is_unique, // @[mshrs.scala:405:14] output io_resp_bits_uop_flush_on_commit, // @[mshrs.scala:405:14] output io_resp_bits_uop_ldst_is_rs1, // @[mshrs.scala:405:14] output [5:0] io_resp_bits_uop_ldst, // @[mshrs.scala:405:14] output [5:0] io_resp_bits_uop_lrs1, // @[mshrs.scala:405:14] output [5:0] io_resp_bits_uop_lrs2, // @[mshrs.scala:405:14] output [5:0] io_resp_bits_uop_lrs3, // @[mshrs.scala:405:14] output io_resp_bits_uop_ldst_val, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_dst_rtype, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_lrs1_rtype, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_lrs2_rtype, // @[mshrs.scala:405:14] output io_resp_bits_uop_frs3_en, // @[mshrs.scala:405:14] output io_resp_bits_uop_fp_val, // @[mshrs.scala:405:14] output io_resp_bits_uop_fp_single, // @[mshrs.scala:405:14] output io_resp_bits_uop_xcpt_pf_if, // @[mshrs.scala:405:14] output io_resp_bits_uop_xcpt_ae_if, // @[mshrs.scala:405:14] output io_resp_bits_uop_xcpt_ma_if, // @[mshrs.scala:405:14] output io_resp_bits_uop_bp_debug_if, // @[mshrs.scala:405:14] output io_resp_bits_uop_bp_xcpt_if, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_debug_fsrc, // @[mshrs.scala:405:14] output [1:0] io_resp_bits_uop_debug_tsrc, // @[mshrs.scala:405:14] output [63:0] io_resp_bits_data, // @[mshrs.scala:405:14] output io_resp_bits_is_hella, // @[mshrs.scala:405:14] input io_mem_access_ready, // @[mshrs.scala:405:14] output io_mem_access_valid, // @[mshrs.scala:405:14] output [2:0] io_mem_access_bits_opcode, // @[mshrs.scala:405:14] output [2:0] io_mem_access_bits_param, // @[mshrs.scala:405:14] output [3:0] io_mem_access_bits_size, // @[mshrs.scala:405:14] output [2:0] io_mem_access_bits_source, // @[mshrs.scala:405:14] output [31:0] io_mem_access_bits_address, // @[mshrs.scala:405:14] output [15:0] io_mem_access_bits_mask, // @[mshrs.scala:405:14] output [127:0] io_mem_access_bits_data, // @[mshrs.scala:405:14] input io_mem_ack_valid, // @[mshrs.scala:405:14] input [2:0] io_mem_ack_bits_opcode, // @[mshrs.scala:405:14] input [1:0] io_mem_ack_bits_param, // @[mshrs.scala:405:14] input [3:0] io_mem_ack_bits_size, // @[mshrs.scala:405:14] input [2:0] io_mem_ack_bits_source, // @[mshrs.scala:405:14] input [3:0] io_mem_ack_bits_sink, // @[mshrs.scala:405:14] input io_mem_ack_bits_denied, // @[mshrs.scala:405:14] input [127:0] io_mem_ack_bits_data, // @[mshrs.scala:405:14] input io_mem_ack_bits_corrupt // @[mshrs.scala:405:14] ); wire io_req_valid_0 = io_req_valid; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_uopc_0 = io_req_bits_uop_uopc; // @[mshrs.scala:402:7] wire [31:0] io_req_bits_uop_inst_0 = io_req_bits_uop_inst; // @[mshrs.scala:402:7] wire [31:0] io_req_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc; // @[mshrs.scala:402:7] wire [39:0] io_req_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc; // @[mshrs.scala:402:7] wire [2:0] io_req_bits_uop_iq_type_0 = io_req_bits_uop_iq_type; // @[mshrs.scala:402:7] wire [9:0] io_req_bits_uop_fu_code_0 = io_req_bits_uop_fu_code; // @[mshrs.scala:402:7] wire [3:0] io_req_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel; // @[mshrs.scala:402:7] wire [2:0] io_req_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel; // @[mshrs.scala:402:7] wire [2:0] io_req_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel; // @[mshrs.scala:402:7] wire [4:0] io_req_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn; // @[mshrs.scala:402:7] wire io_req_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw; // @[mshrs.scala:402:7] wire [2:0] io_req_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd; // @[mshrs.scala:402:7] wire io_req_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load; // @[mshrs.scala:402:7] wire io_req_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta; // @[mshrs.scala:402:7] wire io_req_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_iw_state_0 = io_req_bits_uop_iw_state; // @[mshrs.scala:402:7] wire io_req_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned; // @[mshrs.scala:402:7] wire io_req_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_br_0 = io_req_bits_uop_is_br; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_jal_0 = io_req_bits_uop_is_jal; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb; // @[mshrs.scala:402:7] wire [15:0] io_req_bits_uop_br_mask_0 = io_req_bits_uop_br_mask; // @[mshrs.scala:402:7] wire [3:0] io_req_bits_uop_br_tag_0 = io_req_bits_uop_br_tag; // @[mshrs.scala:402:7] wire [4:0] io_req_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx; // @[mshrs.scala:402:7] wire io_req_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst; // @[mshrs.scala:402:7] wire [5:0] io_req_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob; // @[mshrs.scala:402:7] wire io_req_bits_uop_taken_0 = io_req_bits_uop_taken; // @[mshrs.scala:402:7] wire [19:0] io_req_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed; // @[mshrs.scala:402:7] wire [11:0] io_req_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx; // @[mshrs.scala:402:7] wire [4:0] io_req_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx; // @[mshrs.scala:402:7] wire [4:0] io_req_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_pdst_0 = io_req_bits_uop_pdst; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_prs1_0 = io_req_bits_uop_prs1; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_prs2_0 = io_req_bits_uop_prs2; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_prs3_0 = io_req_bits_uop_prs3; // @[mshrs.scala:402:7] wire [4:0] io_req_bits_uop_ppred_0 = io_req_bits_uop_ppred; // @[mshrs.scala:402:7] wire io_req_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy; // @[mshrs.scala:402:7] wire io_req_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy; // @[mshrs.scala:402:7] wire io_req_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy; // @[mshrs.scala:402:7] wire io_req_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy; // @[mshrs.scala:402:7] wire [6:0] io_req_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst; // @[mshrs.scala:402:7] wire io_req_bits_uop_exception_0 = io_req_bits_uop_exception; // @[mshrs.scala:402:7] wire [63:0] io_req_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause; // @[mshrs.scala:402:7] wire io_req_bits_uop_bypassable_0 = io_req_bits_uop_bypassable; // @[mshrs.scala:402:7] wire [4:0] io_req_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_mem_size_0 = io_req_bits_uop_mem_size; // @[mshrs.scala:402:7] wire io_req_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_fence_0 = io_req_bits_uop_is_fence; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_amo_0 = io_req_bits_uop_is_amo; // @[mshrs.scala:402:7] wire io_req_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq; // @[mshrs.scala:402:7] wire io_req_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc; // @[mshrs.scala:402:7] wire io_req_bits_uop_is_unique_0 = io_req_bits_uop_is_unique; // @[mshrs.scala:402:7] wire io_req_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit; // @[mshrs.scala:402:7] wire io_req_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1; // @[mshrs.scala:402:7] wire [5:0] io_req_bits_uop_ldst_0 = io_req_bits_uop_ldst; // @[mshrs.scala:402:7] wire [5:0] io_req_bits_uop_lrs1_0 = io_req_bits_uop_lrs1; // @[mshrs.scala:402:7] wire [5:0] io_req_bits_uop_lrs2_0 = io_req_bits_uop_lrs2; // @[mshrs.scala:402:7] wire [5:0] io_req_bits_uop_lrs3_0 = io_req_bits_uop_lrs3; // @[mshrs.scala:402:7] wire io_req_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype; // @[mshrs.scala:402:7] wire io_req_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en; // @[mshrs.scala:402:7] wire io_req_bits_uop_fp_val_0 = io_req_bits_uop_fp_val; // @[mshrs.scala:402:7] wire io_req_bits_uop_fp_single_0 = io_req_bits_uop_fp_single; // @[mshrs.scala:402:7] wire io_req_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if; // @[mshrs.scala:402:7] wire io_req_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if; // @[mshrs.scala:402:7] wire io_req_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if; // @[mshrs.scala:402:7] wire io_req_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if; // @[mshrs.scala:402:7] wire io_req_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc; // @[mshrs.scala:402:7] wire [1:0] io_req_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc; // @[mshrs.scala:402:7] wire [39:0] io_req_bits_addr_0 = io_req_bits_addr; // @[mshrs.scala:402:7] wire [63:0] io_req_bits_data_0 = io_req_bits_data; // @[mshrs.scala:402:7] wire io_req_bits_is_hella_0 = io_req_bits_is_hella; // @[mshrs.scala:402:7] wire io_resp_ready_0 = io_resp_ready; // @[mshrs.scala:402:7] wire io_mem_access_ready_0 = io_mem_access_ready; // @[mshrs.scala:402:7] wire io_mem_ack_valid_0 = io_mem_ack_valid; // @[mshrs.scala:402:7] wire [2:0] io_mem_ack_bits_opcode_0 = io_mem_ack_bits_opcode; // @[mshrs.scala:402:7] wire [1:0] io_mem_ack_bits_param_0 = io_mem_ack_bits_param; // @[mshrs.scala:402:7] wire [3:0] io_mem_ack_bits_size_0 = io_mem_ack_bits_size; // @[mshrs.scala:402:7] wire [2:0] io_mem_ack_bits_source_0 = io_mem_ack_bits_source; // @[mshrs.scala:402:7] wire [3:0] io_mem_ack_bits_sink_0 = io_mem_ack_bits_sink; // @[mshrs.scala:402:7] wire io_mem_ack_bits_denied_0 = io_mem_ack_bits_denied; // @[mshrs.scala:402:7] wire [127:0] io_mem_ack_bits_data_0 = io_mem_ack_bits_data; // @[mshrs.scala:402:7] wire io_mem_ack_bits_corrupt_0 = io_mem_ack_bits_corrupt; // @[mshrs.scala:402:7] wire io_mem_access_bits_corrupt = 1'h0; // @[mshrs.scala:402:7] wire get_corrupt = 1'h0; // @[Edges.scala:460:17] wire get_a_mask_sub_sub_sub_sub_0_1 = 1'h0; // @[Misc.scala:206:21] wire _put_legal_T_62 = 1'h0; // @[Parameters.scala:684:29] wire _put_legal_T_68 = 1'h0; // @[Parameters.scala:684:54] wire put_corrupt = 1'h0; // @[Edges.scala:480:17] wire put_a_mask_sub_sub_sub_sub_0_1 = 1'h0; // @[Misc.scala:206:21] wire _atomics_WIRE_corrupt = 1'h0; // @[mshrs.scala:439:46] wire _atomics_legal_T_34 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_40 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_corrupt = 1'h0; // @[Edges.scala:534:17] wire atomics_a_mask_sub_sub_sub_sub_0_1 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_93 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_99 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_1_corrupt = 1'h0; // @[Edges.scala:534:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_1 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_152 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_158 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_2_corrupt = 1'h0; // @[Edges.scala:534:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_2 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_211 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_217 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_3_corrupt = 1'h0; // @[Edges.scala:534:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_3 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_270 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_276 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_4_corrupt = 1'h0; // @[Edges.scala:517:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_4 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_329 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_335 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_5_corrupt = 1'h0; // @[Edges.scala:517:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_5 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_388 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_394 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_6_corrupt = 1'h0; // @[Edges.scala:517:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_6 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_447 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_453 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_7_corrupt = 1'h0; // @[Edges.scala:517:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_7 = 1'h0; // @[Misc.scala:206:21] wire _atomics_legal_T_506 = 1'h0; // @[Parameters.scala:684:29] wire _atomics_legal_T_512 = 1'h0; // @[Parameters.scala:684:54] wire atomics_a_8_corrupt = 1'h0; // @[Edges.scala:517:17] wire atomics_a_mask_sub_sub_sub_sub_0_1_8 = 1'h0; // @[Misc.scala:206:21] wire _atomics_T_1_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_3_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_5_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_7_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_9_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_11_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_13_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _atomics_T_15_corrupt = 1'h0; // @[mshrs.scala:439:75] wire atomics_corrupt = 1'h0; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_42_corrupt = 1'h0; // @[mshrs.scala:457:66] wire _io_mem_access_bits_T_43_corrupt = 1'h0; // @[mshrs.scala:457:29] wire io_resp_bits_data_doZero = 1'h0; // @[AMOALU.scala:43:31] wire io_resp_bits_data_doZero_1 = 1'h0; // @[AMOALU.scala:43:31] wire io_resp_bits_data_doZero_2 = 1'h0; // @[AMOALU.scala:43:31] wire [2:0] get_source = 3'h5; // @[Edges.scala:460:17] wire [2:0] put_source = 3'h5; // @[Edges.scala:480:17] wire [2:0] atomics_a_source = 3'h5; // @[Edges.scala:534:17] wire [2:0] atomics_a_1_source = 3'h5; // @[Edges.scala:534:17] wire [2:0] atomics_a_2_source = 3'h5; // @[Edges.scala:534:17] wire [2:0] atomics_a_3_source = 3'h5; // @[Edges.scala:534:17] wire [2:0] atomics_a_4_source = 3'h5; // @[Edges.scala:517:17] wire [2:0] atomics_a_5_source = 3'h5; // @[Edges.scala:517:17] wire [2:0] atomics_a_6_source = 3'h5; // @[Edges.scala:517:17] wire [2:0] atomics_a_7_source = 3'h5; // @[Edges.scala:517:17] wire [2:0] atomics_a_8_source = 3'h5; // @[Edges.scala:517:17] wire [2:0] _io_mem_access_bits_T_42_source = 3'h5; // @[mshrs.scala:457:66] wire [2:0] get_param = 3'h0; // @[Edges.scala:460:17] wire [2:0] put_opcode = 3'h0; // @[Edges.scala:480:17] wire [2:0] put_param = 3'h0; // @[Edges.scala:480:17] wire [2:0] _atomics_WIRE_opcode = 3'h0; // @[mshrs.scala:439:46] wire [2:0] _atomics_WIRE_param = 3'h0; // @[mshrs.scala:439:46] wire [2:0] _atomics_WIRE_source = 3'h0; // @[mshrs.scala:439:46] wire [2:0] atomics_a_1_param = 3'h0; // @[Edges.scala:534:17] wire [2:0] atomics_a_5_param = 3'h0; // @[Edges.scala:517:17] wire [2:0] _io_mem_access_bits_T_42_param = 3'h0; // @[mshrs.scala:457:66] wire [2:0] atomics_a_opcode = 3'h3; // @[Edges.scala:534:17] wire [2:0] atomics_a_param = 3'h3; // @[Edges.scala:534:17] wire [2:0] atomics_a_1_opcode = 3'h3; // @[Edges.scala:534:17] wire [2:0] atomics_a_2_opcode = 3'h3; // @[Edges.scala:534:17] wire [2:0] atomics_a_3_opcode = 3'h3; // @[Edges.scala:534:17] wire [2:0] atomics_a_8_param = 3'h3; // @[Edges.scala:517:17] wire [2:0] atomics_a_3_param = 3'h2; // @[Edges.scala:534:17] wire [2:0] atomics_a_4_opcode = 3'h2; // @[Edges.scala:517:17] wire [2:0] atomics_a_5_opcode = 3'h2; // @[Edges.scala:517:17] wire [2:0] atomics_a_6_opcode = 3'h2; // @[Edges.scala:517:17] wire [2:0] atomics_a_7_opcode = 3'h2; // @[Edges.scala:517:17] wire [2:0] atomics_a_7_param = 3'h2; // @[Edges.scala:517:17] wire [2:0] atomics_a_8_opcode = 3'h2; // @[Edges.scala:517:17] wire _get_legal_T = 1'h1; // @[Parameters.scala:92:28] wire _get_legal_T_1 = 1'h1; // @[Parameters.scala:92:38] wire _get_legal_T_2 = 1'h1; // @[Parameters.scala:92:33] wire _get_legal_T_3 = 1'h1; // @[Parameters.scala:684:29] wire _get_legal_T_10 = 1'h1; // @[Parameters.scala:92:28] wire _get_legal_T_11 = 1'h1; // @[Parameters.scala:92:38] wire _get_legal_T_12 = 1'h1; // @[Parameters.scala:92:33] wire _get_legal_T_13 = 1'h1; // @[Parameters.scala:684:29] wire _put_legal_T = 1'h1; // @[Parameters.scala:92:28] wire _put_legal_T_1 = 1'h1; // @[Parameters.scala:92:38] wire _put_legal_T_2 = 1'h1; // @[Parameters.scala:92:33] wire _put_legal_T_3 = 1'h1; // @[Parameters.scala:684:29] wire _put_legal_T_10 = 1'h1; // @[Parameters.scala:92:28] wire _put_legal_T_11 = 1'h1; // @[Parameters.scala:92:38] wire _put_legal_T_12 = 1'h1; // @[Parameters.scala:92:33] wire _put_legal_T_13 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_1 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_2 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_3 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_41 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_42 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_43 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_44 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_59 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_60 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_61 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_62 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_100 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_101 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_102 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_103 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_118 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_119 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_120 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_121 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_159 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_160 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_161 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_162 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_177 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_178 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_179 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_180 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_218 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_219 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_220 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_221 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_236 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_237 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_238 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_239 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_277 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_278 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_279 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_280 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_295 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_296 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_297 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_298 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_336 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_337 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_338 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_339 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_354 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_355 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_356 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_357 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_395 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_396 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_397 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_398 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_413 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_414 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_415 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_416 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_454 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_455 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_456 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_457 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_472 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_473 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_474 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_475 = 1'h1; // @[Parameters.scala:684:29] wire _atomics_legal_T_513 = 1'h1; // @[Parameters.scala:92:28] wire _atomics_legal_T_514 = 1'h1; // @[Parameters.scala:92:38] wire _atomics_legal_T_515 = 1'h1; // @[Parameters.scala:92:33] wire _atomics_legal_T_516 = 1'h1; // @[Parameters.scala:684:29] wire [2:0] atomics_a_2_param = 3'h1; // @[Edges.scala:534:17] wire [2:0] atomics_a_6_param = 3'h1; // @[Edges.scala:517:17] wire [2:0] get_opcode = 3'h4; // @[Edges.scala:460:17] wire [2:0] atomics_a_4_param = 3'h4; // @[Edges.scala:517:17] wire [127:0] get_data = 128'h0; // @[Edges.scala:460:17] wire [127:0] _atomics_WIRE_data = 128'h0; // @[mshrs.scala:439:46] wire [15:0] _atomics_WIRE_mask = 16'h0; // @[mshrs.scala:439:46] wire [31:0] _atomics_WIRE_address = 32'h0; // @[mshrs.scala:439:46] wire [3:0] _atomics_WIRE_size = 4'h0; // @[mshrs.scala:439:46] wire _io_req_ready_T; // @[mshrs.scala:427:25] wire _io_resp_valid_T_1; // @[mshrs.scala:461:43] wire [63:0] _io_resp_bits_data_T_23; // @[AMOALU.scala:45:16] wire _io_mem_access_valid_T; // @[mshrs.scala:456:32] wire [2:0] _io_mem_access_bits_T_43_opcode; // @[mshrs.scala:457:29] wire [2:0] _io_mem_access_bits_T_43_param; // @[mshrs.scala:457:29] wire [3:0] _io_mem_access_bits_T_43_size; // @[mshrs.scala:457:29] wire [2:0] _io_mem_access_bits_T_43_source; // @[mshrs.scala:457:29] wire [31:0] _io_mem_access_bits_T_43_address; // @[mshrs.scala:457:29] wire [15:0] _io_mem_access_bits_T_43_mask; // @[mshrs.scala:457:29] wire [127:0] _io_mem_access_bits_T_43_data; // @[mshrs.scala:457:29] wire io_req_ready_0; // @[mshrs.scala:402:7] wire [3:0] io_resp_bits_uop_ctrl_br_type_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_ctrl_op1_sel_0; // @[mshrs.scala:402:7] wire [2:0] io_resp_bits_uop_ctrl_op2_sel_0; // @[mshrs.scala:402:7] wire [2:0] io_resp_bits_uop_ctrl_imm_sel_0; // @[mshrs.scala:402:7] wire [4:0] io_resp_bits_uop_ctrl_op_fcn_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ctrl_fcn_dw_0; // @[mshrs.scala:402:7] wire [2:0] io_resp_bits_uop_ctrl_csr_cmd_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ctrl_is_load_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ctrl_is_sta_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ctrl_is_std_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_uopc_0; // @[mshrs.scala:402:7] wire [31:0] io_resp_bits_uop_inst_0; // @[mshrs.scala:402:7] wire [31:0] io_resp_bits_uop_debug_inst_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_rvc_0; // @[mshrs.scala:402:7] wire [39:0] io_resp_bits_uop_debug_pc_0; // @[mshrs.scala:402:7] wire [2:0] io_resp_bits_uop_iq_type_0; // @[mshrs.scala:402:7] wire [9:0] io_resp_bits_uop_fu_code_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_iw_state_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_iw_p1_poisoned_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_iw_p2_poisoned_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_br_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_jalr_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_jal_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_sfb_0; // @[mshrs.scala:402:7] wire [15:0] io_resp_bits_uop_br_mask_0; // @[mshrs.scala:402:7] wire [3:0] io_resp_bits_uop_br_tag_0; // @[mshrs.scala:402:7] wire [4:0] io_resp_bits_uop_ftq_idx_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_edge_inst_0; // @[mshrs.scala:402:7] wire [5:0] io_resp_bits_uop_pc_lob_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_taken_0; // @[mshrs.scala:402:7] wire [19:0] io_resp_bits_uop_imm_packed_0; // @[mshrs.scala:402:7] wire [11:0] io_resp_bits_uop_csr_addr_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_rob_idx_0; // @[mshrs.scala:402:7] wire [4:0] io_resp_bits_uop_ldq_idx_0; // @[mshrs.scala:402:7] wire [4:0] io_resp_bits_uop_stq_idx_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_rxq_idx_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_pdst_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_prs1_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_prs2_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_prs3_0; // @[mshrs.scala:402:7] wire [4:0] io_resp_bits_uop_ppred_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_prs1_busy_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_prs2_busy_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_prs3_busy_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ppred_busy_0; // @[mshrs.scala:402:7] wire [6:0] io_resp_bits_uop_stale_pdst_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_exception_0; // @[mshrs.scala:402:7] wire [63:0] io_resp_bits_uop_exc_cause_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_bypassable_0; // @[mshrs.scala:402:7] wire [4:0] io_resp_bits_uop_mem_cmd_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_mem_size_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_mem_signed_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_fence_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_fencei_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_amo_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_uses_ldq_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_uses_stq_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_sys_pc2epc_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_is_unique_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_flush_on_commit_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ldst_is_rs1_0; // @[mshrs.scala:402:7] wire [5:0] io_resp_bits_uop_ldst_0; // @[mshrs.scala:402:7] wire [5:0] io_resp_bits_uop_lrs1_0; // @[mshrs.scala:402:7] wire [5:0] io_resp_bits_uop_lrs2_0; // @[mshrs.scala:402:7] wire [5:0] io_resp_bits_uop_lrs3_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_ldst_val_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_dst_rtype_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_lrs1_rtype_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_lrs2_rtype_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_frs3_en_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_fp_val_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_fp_single_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_xcpt_pf_if_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_xcpt_ae_if_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_xcpt_ma_if_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_bp_debug_if_0; // @[mshrs.scala:402:7] wire io_resp_bits_uop_bp_xcpt_if_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_debug_fsrc_0; // @[mshrs.scala:402:7] wire [1:0] io_resp_bits_uop_debug_tsrc_0; // @[mshrs.scala:402:7] wire [63:0] io_resp_bits_data_0; // @[mshrs.scala:402:7] wire io_resp_bits_is_hella_0; // @[mshrs.scala:402:7] wire io_resp_valid_0; // @[mshrs.scala:402:7] wire [2:0] io_mem_access_bits_opcode_0; // @[mshrs.scala:402:7] wire [2:0] io_mem_access_bits_param_0; // @[mshrs.scala:402:7] wire [3:0] io_mem_access_bits_size_0; // @[mshrs.scala:402:7] wire [2:0] io_mem_access_bits_source_0; // @[mshrs.scala:402:7] wire [31:0] io_mem_access_bits_address_0; // @[mshrs.scala:402:7] wire [15:0] io_mem_access_bits_mask_0; // @[mshrs.scala:402:7] wire [127:0] io_mem_access_bits_data_0; // @[mshrs.scala:402:7] wire io_mem_access_valid_0; // @[mshrs.scala:402:7] reg [6:0] req_uop_uopc; // @[mshrs.scala:421:16] assign io_resp_bits_uop_uopc_0 = req_uop_uopc; // @[mshrs.scala:402:7, :421:16] reg [31:0] req_uop_inst; // @[mshrs.scala:421:16] assign io_resp_bits_uop_inst_0 = req_uop_inst; // @[mshrs.scala:402:7, :421:16] reg [31:0] req_uop_debug_inst; // @[mshrs.scala:421:16] assign io_resp_bits_uop_debug_inst_0 = req_uop_debug_inst; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_rvc; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_rvc_0 = req_uop_is_rvc; // @[mshrs.scala:402:7, :421:16] reg [39:0] req_uop_debug_pc; // @[mshrs.scala:421:16] assign io_resp_bits_uop_debug_pc_0 = req_uop_debug_pc; // @[mshrs.scala:402:7, :421:16] reg [2:0] req_uop_iq_type; // @[mshrs.scala:421:16] assign io_resp_bits_uop_iq_type_0 = req_uop_iq_type; // @[mshrs.scala:402:7, :421:16] reg [9:0] req_uop_fu_code; // @[mshrs.scala:421:16] assign io_resp_bits_uop_fu_code_0 = req_uop_fu_code; // @[mshrs.scala:402:7, :421:16] reg [3:0] req_uop_ctrl_br_type; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_br_type_0 = req_uop_ctrl_br_type; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_ctrl_op1_sel; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_op1_sel_0 = req_uop_ctrl_op1_sel; // @[mshrs.scala:402:7, :421:16] reg [2:0] req_uop_ctrl_op2_sel; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_op2_sel_0 = req_uop_ctrl_op2_sel; // @[mshrs.scala:402:7, :421:16] reg [2:0] req_uop_ctrl_imm_sel; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_imm_sel_0 = req_uop_ctrl_imm_sel; // @[mshrs.scala:402:7, :421:16] reg [4:0] req_uop_ctrl_op_fcn; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_op_fcn_0 = req_uop_ctrl_op_fcn; // @[mshrs.scala:402:7, :421:16] reg req_uop_ctrl_fcn_dw; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_fcn_dw_0 = req_uop_ctrl_fcn_dw; // @[mshrs.scala:402:7, :421:16] reg [2:0] req_uop_ctrl_csr_cmd; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_csr_cmd_0 = req_uop_ctrl_csr_cmd; // @[mshrs.scala:402:7, :421:16] reg req_uop_ctrl_is_load; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_is_load_0 = req_uop_ctrl_is_load; // @[mshrs.scala:402:7, :421:16] reg req_uop_ctrl_is_sta; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_is_sta_0 = req_uop_ctrl_is_sta; // @[mshrs.scala:402:7, :421:16] reg req_uop_ctrl_is_std; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ctrl_is_std_0 = req_uop_ctrl_is_std; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_iw_state; // @[mshrs.scala:421:16] assign io_resp_bits_uop_iw_state_0 = req_uop_iw_state; // @[mshrs.scala:402:7, :421:16] reg req_uop_iw_p1_poisoned; // @[mshrs.scala:421:16] assign io_resp_bits_uop_iw_p1_poisoned_0 = req_uop_iw_p1_poisoned; // @[mshrs.scala:402:7, :421:16] reg req_uop_iw_p2_poisoned; // @[mshrs.scala:421:16] assign io_resp_bits_uop_iw_p2_poisoned_0 = req_uop_iw_p2_poisoned; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_br; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_br_0 = req_uop_is_br; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_jalr; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_jalr_0 = req_uop_is_jalr; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_jal; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_jal_0 = req_uop_is_jal; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_sfb; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_sfb_0 = req_uop_is_sfb; // @[mshrs.scala:402:7, :421:16] reg [15:0] req_uop_br_mask; // @[mshrs.scala:421:16] assign io_resp_bits_uop_br_mask_0 = req_uop_br_mask; // @[mshrs.scala:402:7, :421:16] reg [3:0] req_uop_br_tag; // @[mshrs.scala:421:16] assign io_resp_bits_uop_br_tag_0 = req_uop_br_tag; // @[mshrs.scala:402:7, :421:16] reg [4:0] req_uop_ftq_idx; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ftq_idx_0 = req_uop_ftq_idx; // @[mshrs.scala:402:7, :421:16] reg req_uop_edge_inst; // @[mshrs.scala:421:16] assign io_resp_bits_uop_edge_inst_0 = req_uop_edge_inst; // @[mshrs.scala:402:7, :421:16] reg [5:0] req_uop_pc_lob; // @[mshrs.scala:421:16] assign io_resp_bits_uop_pc_lob_0 = req_uop_pc_lob; // @[mshrs.scala:402:7, :421:16] reg req_uop_taken; // @[mshrs.scala:421:16] assign io_resp_bits_uop_taken_0 = req_uop_taken; // @[mshrs.scala:402:7, :421:16] reg [19:0] req_uop_imm_packed; // @[mshrs.scala:421:16] assign io_resp_bits_uop_imm_packed_0 = req_uop_imm_packed; // @[mshrs.scala:402:7, :421:16] reg [11:0] req_uop_csr_addr; // @[mshrs.scala:421:16] assign io_resp_bits_uop_csr_addr_0 = req_uop_csr_addr; // @[mshrs.scala:402:7, :421:16] reg [6:0] req_uop_rob_idx; // @[mshrs.scala:421:16] assign io_resp_bits_uop_rob_idx_0 = req_uop_rob_idx; // @[mshrs.scala:402:7, :421:16] reg [4:0] req_uop_ldq_idx; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ldq_idx_0 = req_uop_ldq_idx; // @[mshrs.scala:402:7, :421:16] reg [4:0] req_uop_stq_idx; // @[mshrs.scala:421:16] assign io_resp_bits_uop_stq_idx_0 = req_uop_stq_idx; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_rxq_idx; // @[mshrs.scala:421:16] assign io_resp_bits_uop_rxq_idx_0 = req_uop_rxq_idx; // @[mshrs.scala:402:7, :421:16] reg [6:0] req_uop_pdst; // @[mshrs.scala:421:16] assign io_resp_bits_uop_pdst_0 = req_uop_pdst; // @[mshrs.scala:402:7, :421:16] reg [6:0] req_uop_prs1; // @[mshrs.scala:421:16] assign io_resp_bits_uop_prs1_0 = req_uop_prs1; // @[mshrs.scala:402:7, :421:16] reg [6:0] req_uop_prs2; // @[mshrs.scala:421:16] assign io_resp_bits_uop_prs2_0 = req_uop_prs2; // @[mshrs.scala:402:7, :421:16] reg [6:0] req_uop_prs3; // @[mshrs.scala:421:16] assign io_resp_bits_uop_prs3_0 = req_uop_prs3; // @[mshrs.scala:402:7, :421:16] reg [4:0] req_uop_ppred; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ppred_0 = req_uop_ppred; // @[mshrs.scala:402:7, :421:16] reg req_uop_prs1_busy; // @[mshrs.scala:421:16] assign io_resp_bits_uop_prs1_busy_0 = req_uop_prs1_busy; // @[mshrs.scala:402:7, :421:16] reg req_uop_prs2_busy; // @[mshrs.scala:421:16] assign io_resp_bits_uop_prs2_busy_0 = req_uop_prs2_busy; // @[mshrs.scala:402:7, :421:16] reg req_uop_prs3_busy; // @[mshrs.scala:421:16] assign io_resp_bits_uop_prs3_busy_0 = req_uop_prs3_busy; // @[mshrs.scala:402:7, :421:16] reg req_uop_ppred_busy; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ppred_busy_0 = req_uop_ppred_busy; // @[mshrs.scala:402:7, :421:16] reg [6:0] req_uop_stale_pdst; // @[mshrs.scala:421:16] assign io_resp_bits_uop_stale_pdst_0 = req_uop_stale_pdst; // @[mshrs.scala:402:7, :421:16] reg req_uop_exception; // @[mshrs.scala:421:16] assign io_resp_bits_uop_exception_0 = req_uop_exception; // @[mshrs.scala:402:7, :421:16] reg [63:0] req_uop_exc_cause; // @[mshrs.scala:421:16] assign io_resp_bits_uop_exc_cause_0 = req_uop_exc_cause; // @[mshrs.scala:402:7, :421:16] reg req_uop_bypassable; // @[mshrs.scala:421:16] assign io_resp_bits_uop_bypassable_0 = req_uop_bypassable; // @[mshrs.scala:402:7, :421:16] reg [4:0] req_uop_mem_cmd; // @[mshrs.scala:421:16] assign io_resp_bits_uop_mem_cmd_0 = req_uop_mem_cmd; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_mem_size; // @[mshrs.scala:421:16] assign io_resp_bits_uop_mem_size_0 = req_uop_mem_size; // @[mshrs.scala:402:7, :421:16] wire [1:0] size = req_uop_mem_size; // @[AMOALU.scala:11:18] reg req_uop_mem_signed; // @[mshrs.scala:421:16] assign io_resp_bits_uop_mem_signed_0 = req_uop_mem_signed; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_fence; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_fence_0 = req_uop_is_fence; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_fencei; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_fencei_0 = req_uop_is_fencei; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_amo; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_amo_0 = req_uop_is_amo; // @[mshrs.scala:402:7, :421:16] reg req_uop_uses_ldq; // @[mshrs.scala:421:16] assign io_resp_bits_uop_uses_ldq_0 = req_uop_uses_ldq; // @[mshrs.scala:402:7, :421:16] reg req_uop_uses_stq; // @[mshrs.scala:421:16] assign io_resp_bits_uop_uses_stq_0 = req_uop_uses_stq; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_sys_pc2epc; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_sys_pc2epc_0 = req_uop_is_sys_pc2epc; // @[mshrs.scala:402:7, :421:16] reg req_uop_is_unique; // @[mshrs.scala:421:16] assign io_resp_bits_uop_is_unique_0 = req_uop_is_unique; // @[mshrs.scala:402:7, :421:16] reg req_uop_flush_on_commit; // @[mshrs.scala:421:16] assign io_resp_bits_uop_flush_on_commit_0 = req_uop_flush_on_commit; // @[mshrs.scala:402:7, :421:16] reg req_uop_ldst_is_rs1; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ldst_is_rs1_0 = req_uop_ldst_is_rs1; // @[mshrs.scala:402:7, :421:16] reg [5:0] req_uop_ldst; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ldst_0 = req_uop_ldst; // @[mshrs.scala:402:7, :421:16] reg [5:0] req_uop_lrs1; // @[mshrs.scala:421:16] assign io_resp_bits_uop_lrs1_0 = req_uop_lrs1; // @[mshrs.scala:402:7, :421:16] reg [5:0] req_uop_lrs2; // @[mshrs.scala:421:16] assign io_resp_bits_uop_lrs2_0 = req_uop_lrs2; // @[mshrs.scala:402:7, :421:16] reg [5:0] req_uop_lrs3; // @[mshrs.scala:421:16] assign io_resp_bits_uop_lrs3_0 = req_uop_lrs3; // @[mshrs.scala:402:7, :421:16] reg req_uop_ldst_val; // @[mshrs.scala:421:16] assign io_resp_bits_uop_ldst_val_0 = req_uop_ldst_val; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_dst_rtype; // @[mshrs.scala:421:16] assign io_resp_bits_uop_dst_rtype_0 = req_uop_dst_rtype; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_lrs1_rtype; // @[mshrs.scala:421:16] assign io_resp_bits_uop_lrs1_rtype_0 = req_uop_lrs1_rtype; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_lrs2_rtype; // @[mshrs.scala:421:16] assign io_resp_bits_uop_lrs2_rtype_0 = req_uop_lrs2_rtype; // @[mshrs.scala:402:7, :421:16] reg req_uop_frs3_en; // @[mshrs.scala:421:16] assign io_resp_bits_uop_frs3_en_0 = req_uop_frs3_en; // @[mshrs.scala:402:7, :421:16] reg req_uop_fp_val; // @[mshrs.scala:421:16] assign io_resp_bits_uop_fp_val_0 = req_uop_fp_val; // @[mshrs.scala:402:7, :421:16] reg req_uop_fp_single; // @[mshrs.scala:421:16] assign io_resp_bits_uop_fp_single_0 = req_uop_fp_single; // @[mshrs.scala:402:7, :421:16] reg req_uop_xcpt_pf_if; // @[mshrs.scala:421:16] assign io_resp_bits_uop_xcpt_pf_if_0 = req_uop_xcpt_pf_if; // @[mshrs.scala:402:7, :421:16] reg req_uop_xcpt_ae_if; // @[mshrs.scala:421:16] assign io_resp_bits_uop_xcpt_ae_if_0 = req_uop_xcpt_ae_if; // @[mshrs.scala:402:7, :421:16] reg req_uop_xcpt_ma_if; // @[mshrs.scala:421:16] assign io_resp_bits_uop_xcpt_ma_if_0 = req_uop_xcpt_ma_if; // @[mshrs.scala:402:7, :421:16] reg req_uop_bp_debug_if; // @[mshrs.scala:421:16] assign io_resp_bits_uop_bp_debug_if_0 = req_uop_bp_debug_if; // @[mshrs.scala:402:7, :421:16] reg req_uop_bp_xcpt_if; // @[mshrs.scala:421:16] assign io_resp_bits_uop_bp_xcpt_if_0 = req_uop_bp_xcpt_if; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_debug_fsrc; // @[mshrs.scala:421:16] assign io_resp_bits_uop_debug_fsrc_0 = req_uop_debug_fsrc; // @[mshrs.scala:402:7, :421:16] reg [1:0] req_uop_debug_tsrc; // @[mshrs.scala:421:16] assign io_resp_bits_uop_debug_tsrc_0 = req_uop_debug_tsrc; // @[mshrs.scala:402:7, :421:16] reg [39:0] req_addr; // @[mshrs.scala:421:16] wire [39:0] _get_legal_T_14 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_14 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_4 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_63 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_122 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_181 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_240 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_299 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_358 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_417 = req_addr; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_476 = req_addr; // @[Parameters.scala:137:31] reg [63:0] req_data; // @[mshrs.scala:421:16] reg req_is_hella; // @[mshrs.scala:421:16] assign io_resp_bits_is_hella_0 = req_is_hella; // @[mshrs.scala:402:7, :421:16] reg [63:0] grant_word; // @[mshrs.scala:422:23] reg [1:0] state; // @[mshrs.scala:426:22] assign _io_req_ready_T = state == 2'h0; // @[mshrs.scala:426:22, :427:25] assign io_req_ready_0 = _io_req_ready_T; // @[mshrs.scala:402:7, :427:25] wire [127:0] a_data = {2{req_data}}; // @[mshrs.scala:421:16, :434:23] wire [127:0] put_data = a_data; // @[Edges.scala:480:17] wire [127:0] atomics_a_data = a_data; // @[Edges.scala:534:17] wire [127:0] atomics_a_1_data = a_data; // @[Edges.scala:534:17] wire [127:0] atomics_a_2_data = a_data; // @[Edges.scala:534:17] wire [127:0] atomics_a_3_data = a_data; // @[Edges.scala:534:17] wire [127:0] atomics_a_4_data = a_data; // @[Edges.scala:517:17] wire [127:0] atomics_a_5_data = a_data; // @[Edges.scala:517:17] wire [127:0] atomics_a_6_data = a_data; // @[Edges.scala:517:17] wire [127:0] atomics_a_7_data = a_data; // @[Edges.scala:517:17] wire [127:0] atomics_a_8_data = a_data; // @[Edges.scala:517:17] wire [39:0] _GEN = {req_addr[39:14], req_addr[13:0] ^ 14'h3000}; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_4; // @[Parameters.scala:137:31] assign _get_legal_T_4 = _GEN; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_4; // @[Parameters.scala:137:31] assign _put_legal_T_4 = _GEN; // @[Parameters.scala:137:31] wire [40:0] _get_legal_T_5 = {1'h0, _get_legal_T_4}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_6 = _get_legal_T_5 & 41'h9A013000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_7 = _get_legal_T_6; // @[Parameters.scala:137:46] wire _get_legal_T_8 = _get_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _get_legal_T_9 = _get_legal_T_8; // @[Parameters.scala:684:54] wire _get_legal_T_62 = _get_legal_T_9; // @[Parameters.scala:684:54, :686:26] wire [40:0] _get_legal_T_15 = {1'h0, _get_legal_T_14}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_16 = _get_legal_T_15 & 41'h9A012000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_17 = _get_legal_T_16; // @[Parameters.scala:137:46] wire _get_legal_T_18 = _get_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_0 = {req_addr[39:17], req_addr[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_19; // @[Parameters.scala:137:31] assign _get_legal_T_19 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_24; // @[Parameters.scala:137:31] assign _get_legal_T_24 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_63; // @[Parameters.scala:137:31] assign _put_legal_T_63 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_35; // @[Parameters.scala:137:31] assign _atomics_legal_T_35 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_94; // @[Parameters.scala:137:31] assign _atomics_legal_T_94 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_153; // @[Parameters.scala:137:31] assign _atomics_legal_T_153 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_212; // @[Parameters.scala:137:31] assign _atomics_legal_T_212 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_271; // @[Parameters.scala:137:31] assign _atomics_legal_T_271 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_330; // @[Parameters.scala:137:31] assign _atomics_legal_T_330 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_389; // @[Parameters.scala:137:31] assign _atomics_legal_T_389 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_448; // @[Parameters.scala:137:31] assign _atomics_legal_T_448 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_507; // @[Parameters.scala:137:31] assign _atomics_legal_T_507 = _GEN_0; // @[Parameters.scala:137:31] wire [40:0] _get_legal_T_20 = {1'h0, _get_legal_T_19}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_21 = _get_legal_T_20 & 41'h98013000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_22 = _get_legal_T_21; // @[Parameters.scala:137:46] wire _get_legal_T_23 = _get_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _get_legal_T_25 = {1'h0, _get_legal_T_24}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_26 = _get_legal_T_25 & 41'h9A010000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_27 = _get_legal_T_26; // @[Parameters.scala:137:46] wire _get_legal_T_28 = _get_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_1 = {req_addr[39:26], req_addr[25:0] ^ 26'h2000000}; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_29; // @[Parameters.scala:137:31] assign _get_legal_T_29 = _GEN_1; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_24; // @[Parameters.scala:137:31] assign _put_legal_T_24 = _GEN_1; // @[Parameters.scala:137:31] wire [40:0] _get_legal_T_30 = {1'h0, _get_legal_T_29}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_31 = _get_legal_T_30 & 41'h9A010000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_32 = _get_legal_T_31; // @[Parameters.scala:137:46] wire _get_legal_T_33 = _get_legal_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_2 = {req_addr[39:28], req_addr[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_34; // @[Parameters.scala:137:31] assign _get_legal_T_34 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_39; // @[Parameters.scala:137:31] assign _get_legal_T_39 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_34; // @[Parameters.scala:137:31] assign _put_legal_T_34 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_39; // @[Parameters.scala:137:31] assign _put_legal_T_39 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_45; // @[Parameters.scala:137:31] assign _atomics_legal_T_45 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_104; // @[Parameters.scala:137:31] assign _atomics_legal_T_104 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_163; // @[Parameters.scala:137:31] assign _atomics_legal_T_163 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_222; // @[Parameters.scala:137:31] assign _atomics_legal_T_222 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_281; // @[Parameters.scala:137:31] assign _atomics_legal_T_281 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_340; // @[Parameters.scala:137:31] assign _atomics_legal_T_340 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_399; // @[Parameters.scala:137:31] assign _atomics_legal_T_399 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_458; // @[Parameters.scala:137:31] assign _atomics_legal_T_458 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_517; // @[Parameters.scala:137:31] assign _atomics_legal_T_517 = _GEN_2; // @[Parameters.scala:137:31] wire [40:0] _get_legal_T_35 = {1'h0, _get_legal_T_34}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_36 = _get_legal_T_35 & 41'h98000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_37 = _get_legal_T_36; // @[Parameters.scala:137:46] wire _get_legal_T_38 = _get_legal_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _get_legal_T_40 = {1'h0, _get_legal_T_39}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_41 = _get_legal_T_40 & 41'h9A010000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_42 = _get_legal_T_41; // @[Parameters.scala:137:46] wire _get_legal_T_43 = _get_legal_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_3 = {req_addr[39:29], req_addr[28:0] ^ 29'h10000000}; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_44; // @[Parameters.scala:137:31] assign _get_legal_T_44 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_44; // @[Parameters.scala:137:31] assign _put_legal_T_44 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_24; // @[Parameters.scala:137:31] assign _atomics_legal_T_24 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_83; // @[Parameters.scala:137:31] assign _atomics_legal_T_83 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_142; // @[Parameters.scala:137:31] assign _atomics_legal_T_142 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_201; // @[Parameters.scala:137:31] assign _atomics_legal_T_201 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_260; // @[Parameters.scala:137:31] assign _atomics_legal_T_260 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_319; // @[Parameters.scala:137:31] assign _atomics_legal_T_319 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_378; // @[Parameters.scala:137:31] assign _atomics_legal_T_378 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_437; // @[Parameters.scala:137:31] assign _atomics_legal_T_437 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_496; // @[Parameters.scala:137:31] assign _atomics_legal_T_496 = _GEN_3; // @[Parameters.scala:137:31] wire [40:0] _get_legal_T_45 = {1'h0, _get_legal_T_44}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_46 = _get_legal_T_45 & 41'h9A013000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_47 = _get_legal_T_46; // @[Parameters.scala:137:46] wire _get_legal_T_48 = _get_legal_T_47 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] get_address = req_addr[31:0]; // @[Edges.scala:460:17] wire [31:0] put_address = req_addr[31:0]; // @[Edges.scala:480:17] wire [31:0] atomics_a_address = req_addr[31:0]; // @[Edges.scala:534:17] wire [31:0] atomics_a_1_address = req_addr[31:0]; // @[Edges.scala:534:17] wire [31:0] atomics_a_2_address = req_addr[31:0]; // @[Edges.scala:534:17] wire [31:0] atomics_a_3_address = req_addr[31:0]; // @[Edges.scala:534:17] wire [31:0] atomics_a_4_address = req_addr[31:0]; // @[Edges.scala:517:17] wire [31:0] atomics_a_5_address = req_addr[31:0]; // @[Edges.scala:517:17] wire [31:0] atomics_a_6_address = req_addr[31:0]; // @[Edges.scala:517:17] wire [31:0] atomics_a_7_address = req_addr[31:0]; // @[Edges.scala:517:17] wire [31:0] atomics_a_8_address = req_addr[31:0]; // @[Edges.scala:517:17] wire [39:0] _GEN_4 = {req_addr[39:32], req_addr[31:0] ^ 32'h80000000}; // @[Parameters.scala:137:31] wire [39:0] _get_legal_T_49; // @[Parameters.scala:137:31] assign _get_legal_T_49 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_49; // @[Parameters.scala:137:31] assign _put_legal_T_49 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_50; // @[Parameters.scala:137:31] assign _atomics_legal_T_50 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_109; // @[Parameters.scala:137:31] assign _atomics_legal_T_109 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_168; // @[Parameters.scala:137:31] assign _atomics_legal_T_168 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_227; // @[Parameters.scala:137:31] assign _atomics_legal_T_227 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_286; // @[Parameters.scala:137:31] assign _atomics_legal_T_286 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_345; // @[Parameters.scala:137:31] assign _atomics_legal_T_345 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_404; // @[Parameters.scala:137:31] assign _atomics_legal_T_404 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_463; // @[Parameters.scala:137:31] assign _atomics_legal_T_463 = _GEN_4; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_522; // @[Parameters.scala:137:31] assign _atomics_legal_T_522 = _GEN_4; // @[Parameters.scala:137:31] wire [40:0] _get_legal_T_50 = {1'h0, _get_legal_T_49}; // @[Parameters.scala:137:{31,41}] wire [40:0] _get_legal_T_51 = _get_legal_T_50 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _get_legal_T_52 = _get_legal_T_51; // @[Parameters.scala:137:46] wire _get_legal_T_53 = _get_legal_T_52 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _get_legal_T_54 = _get_legal_T_18 | _get_legal_T_23; // @[Parameters.scala:685:42] wire _get_legal_T_55 = _get_legal_T_54 | _get_legal_T_28; // @[Parameters.scala:685:42] wire _get_legal_T_56 = _get_legal_T_55 | _get_legal_T_33; // @[Parameters.scala:685:42] wire _get_legal_T_57 = _get_legal_T_56 | _get_legal_T_38; // @[Parameters.scala:685:42] wire _get_legal_T_58 = _get_legal_T_57 | _get_legal_T_43; // @[Parameters.scala:685:42] wire _get_legal_T_59 = _get_legal_T_58 | _get_legal_T_48; // @[Parameters.scala:685:42] wire _get_legal_T_60 = _get_legal_T_59 | _get_legal_T_53; // @[Parameters.scala:685:42] wire _get_legal_T_61 = _get_legal_T_60; // @[Parameters.scala:684:54, :685:42] wire get_legal = _get_legal_T_62 | _get_legal_T_61; // @[Parameters.scala:684:54, :686:26] wire [15:0] _get_a_mask_T; // @[Misc.scala:222:10] wire [3:0] get_size; // @[Edges.scala:460:17] wire [15:0] get_mask; // @[Edges.scala:460:17] wire [3:0] _GEN_5 = {2'h0, req_uop_mem_size}; // @[Edges.scala:463:15] assign get_size = _GEN_5; // @[Edges.scala:460:17, :463:15] wire [3:0] _get_a_mask_sizeOH_T; // @[Misc.scala:202:34] assign _get_a_mask_sizeOH_T = _GEN_5; // @[Misc.scala:202:34] wire [3:0] put_size; // @[Edges.scala:480:17] assign put_size = _GEN_5; // @[Edges.scala:463:15, :480:17] wire [3:0] _put_a_mask_sizeOH_T; // @[Misc.scala:202:34] assign _put_a_mask_sizeOH_T = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_size; // @[Edges.scala:534:17] assign atomics_a_size = _GEN_5; // @[Edges.scala:463:15, :534:17] wire [3:0] _atomics_a_mask_sizeOH_T; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_1_size; // @[Edges.scala:534:17] assign atomics_a_1_size = _GEN_5; // @[Edges.scala:463:15, :534:17] wire [3:0] _atomics_a_mask_sizeOH_T_3; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_3 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_2_size; // @[Edges.scala:534:17] assign atomics_a_2_size = _GEN_5; // @[Edges.scala:463:15, :534:17] wire [3:0] _atomics_a_mask_sizeOH_T_6; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_6 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_3_size; // @[Edges.scala:534:17] assign atomics_a_3_size = _GEN_5; // @[Edges.scala:463:15, :534:17] wire [3:0] _atomics_a_mask_sizeOH_T_9; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_9 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_4_size; // @[Edges.scala:517:17] assign atomics_a_4_size = _GEN_5; // @[Edges.scala:463:15, :517:17] wire [3:0] _atomics_a_mask_sizeOH_T_12; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_12 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_5_size; // @[Edges.scala:517:17] assign atomics_a_5_size = _GEN_5; // @[Edges.scala:463:15, :517:17] wire [3:0] _atomics_a_mask_sizeOH_T_15; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_15 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_6_size; // @[Edges.scala:517:17] assign atomics_a_6_size = _GEN_5; // @[Edges.scala:463:15, :517:17] wire [3:0] _atomics_a_mask_sizeOH_T_18; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_18 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_7_size; // @[Edges.scala:517:17] assign atomics_a_7_size = _GEN_5; // @[Edges.scala:463:15, :517:17] wire [3:0] _atomics_a_mask_sizeOH_T_21; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_21 = _GEN_5; // @[Misc.scala:202:34] wire [3:0] atomics_a_8_size; // @[Edges.scala:517:17] assign atomics_a_8_size = _GEN_5; // @[Edges.scala:463:15, :517:17] wire [3:0] _atomics_a_mask_sizeOH_T_24; // @[Misc.scala:202:34] assign _atomics_a_mask_sizeOH_T_24 = _GEN_5; // @[Misc.scala:202:34] wire [1:0] get_a_mask_sizeOH_shiftAmount = _get_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _get_a_mask_sizeOH_T_1 = 4'h1 << get_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [3:0] _get_a_mask_sizeOH_T_2 = _get_a_mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [3:0] get_a_mask_sizeOH = {_get_a_mask_sizeOH_T_2[3:1], 1'h1}; // @[OneHot.scala:65:27] wire get_a_mask_sub_sub_sub_size = get_a_mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire get_a_mask_sub_sub_sub_bit = req_addr[3]; // @[Misc.scala:210:26] wire put_a_mask_sub_sub_sub_bit = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_1 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_2 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_3 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_4 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_5 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_6 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_7 = req_addr[3]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_sub_bit_8 = req_addr[3]; // @[Misc.scala:210:26] wire _grant_word_shift_T = req_addr[3]; // @[package.scala:163:13] wire get_a_mask_sub_sub_sub_1_2 = get_a_mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire get_a_mask_sub_sub_sub_nbit = ~get_a_mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire get_a_mask_sub_sub_sub_0_2 = get_a_mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_sub_sub_acc_T = get_a_mask_sub_sub_sub_size & get_a_mask_sub_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_sub_sub_0_1 = _get_a_mask_sub_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire _get_a_mask_sub_sub_sub_acc_T_1 = get_a_mask_sub_sub_sub_size & get_a_mask_sub_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_sub_sub_1_1 = _get_a_mask_sub_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_sub_size = get_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire get_a_mask_sub_sub_bit = req_addr[2]; // @[Misc.scala:210:26] wire put_a_mask_sub_sub_bit = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_1 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_2 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_3 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_4 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_5 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_6 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_7 = req_addr[2]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_sub_bit_8 = req_addr[2]; // @[Misc.scala:210:26] wire _io_resp_bits_data_shifted_T = req_addr[2]; // @[Misc.scala:210:26] wire get_a_mask_sub_sub_nbit = ~get_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire get_a_mask_sub_sub_0_2 = get_a_mask_sub_sub_sub_0_2 & get_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_sub_acc_T = get_a_mask_sub_sub_size & get_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_sub_0_1 = get_a_mask_sub_sub_sub_0_1 | _get_a_mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_sub_1_2 = get_a_mask_sub_sub_sub_0_2 & get_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_sub_sub_acc_T_1 = get_a_mask_sub_sub_size & get_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_sub_1_1 = get_a_mask_sub_sub_sub_0_1 | _get_a_mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_sub_2_2 = get_a_mask_sub_sub_sub_1_2 & get_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_sub_acc_T_2 = get_a_mask_sub_sub_size & get_a_mask_sub_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_sub_2_1 = get_a_mask_sub_sub_sub_1_1 | _get_a_mask_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_sub_3_2 = get_a_mask_sub_sub_sub_1_2 & get_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_sub_sub_acc_T_3 = get_a_mask_sub_sub_size & get_a_mask_sub_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_sub_3_1 = get_a_mask_sub_sub_sub_1_1 | _get_a_mask_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_size = get_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire get_a_mask_sub_bit = req_addr[1]; // @[Misc.scala:210:26] wire put_a_mask_sub_bit = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_1 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_2 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_3 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_4 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_5 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_6 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_7 = req_addr[1]; // @[Misc.scala:210:26] wire atomics_a_mask_sub_bit_8 = req_addr[1]; // @[Misc.scala:210:26] wire _io_resp_bits_data_shifted_T_3 = req_addr[1]; // @[Misc.scala:210:26] wire get_a_mask_sub_nbit = ~get_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire get_a_mask_sub_0_2 = get_a_mask_sub_sub_0_2 & get_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_acc_T = get_a_mask_sub_size & get_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_0_1 = get_a_mask_sub_sub_0_1 | _get_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_1_2 = get_a_mask_sub_sub_0_2 & get_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_sub_acc_T_1 = get_a_mask_sub_size & get_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_1_1 = get_a_mask_sub_sub_0_1 | _get_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_2_2 = get_a_mask_sub_sub_1_2 & get_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_acc_T_2 = get_a_mask_sub_size & get_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_2_1 = get_a_mask_sub_sub_1_1 | _get_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_3_2 = get_a_mask_sub_sub_1_2 & get_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_sub_acc_T_3 = get_a_mask_sub_size & get_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_3_1 = get_a_mask_sub_sub_1_1 | _get_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_4_2 = get_a_mask_sub_sub_2_2 & get_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_acc_T_4 = get_a_mask_sub_size & get_a_mask_sub_4_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_4_1 = get_a_mask_sub_sub_2_1 | _get_a_mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_5_2 = get_a_mask_sub_sub_2_2 & get_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_sub_acc_T_5 = get_a_mask_sub_size & get_a_mask_sub_5_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_5_1 = get_a_mask_sub_sub_2_1 | _get_a_mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_6_2 = get_a_mask_sub_sub_3_2 & get_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_sub_acc_T_6 = get_a_mask_sub_size & get_a_mask_sub_6_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_6_1 = get_a_mask_sub_sub_3_1 | _get_a_mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire get_a_mask_sub_7_2 = get_a_mask_sub_sub_3_2 & get_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_sub_acc_T_7 = get_a_mask_sub_size & get_a_mask_sub_7_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_sub_7_1 = get_a_mask_sub_sub_3_1 | _get_a_mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire get_a_mask_size = get_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire get_a_mask_bit = req_addr[0]; // @[Misc.scala:210:26] wire put_a_mask_bit = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_1 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_2 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_3 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_4 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_5 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_6 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_7 = req_addr[0]; // @[Misc.scala:210:26] wire atomics_a_mask_bit_8 = req_addr[0]; // @[Misc.scala:210:26] wire _io_resp_bits_data_shifted_T_6 = req_addr[0]; // @[Misc.scala:210:26] wire get_a_mask_nbit = ~get_a_mask_bit; // @[Misc.scala:210:26, :211:20] wire get_a_mask_eq = get_a_mask_sub_0_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T = get_a_mask_size & get_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc = get_a_mask_sub_0_1 | _get_a_mask_acc_T; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_1 = get_a_mask_sub_0_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_1 = get_a_mask_size & get_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_1 = get_a_mask_sub_0_1 | _get_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_2 = get_a_mask_sub_1_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_2 = get_a_mask_size & get_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_2 = get_a_mask_sub_1_1 | _get_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_3 = get_a_mask_sub_1_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_3 = get_a_mask_size & get_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_3 = get_a_mask_sub_1_1 | _get_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_4 = get_a_mask_sub_2_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_4 = get_a_mask_size & get_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_4 = get_a_mask_sub_2_1 | _get_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_5 = get_a_mask_sub_2_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_5 = get_a_mask_size & get_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_5 = get_a_mask_sub_2_1 | _get_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_6 = get_a_mask_sub_3_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_6 = get_a_mask_size & get_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_6 = get_a_mask_sub_3_1 | _get_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_7 = get_a_mask_sub_3_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_7 = get_a_mask_size & get_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_7 = get_a_mask_sub_3_1 | _get_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_8 = get_a_mask_sub_4_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_8 = get_a_mask_size & get_a_mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_8 = get_a_mask_sub_4_1 | _get_a_mask_acc_T_8; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_9 = get_a_mask_sub_4_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_9 = get_a_mask_size & get_a_mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_9 = get_a_mask_sub_4_1 | _get_a_mask_acc_T_9; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_10 = get_a_mask_sub_5_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_10 = get_a_mask_size & get_a_mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_10 = get_a_mask_sub_5_1 | _get_a_mask_acc_T_10; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_11 = get_a_mask_sub_5_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_11 = get_a_mask_size & get_a_mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_11 = get_a_mask_sub_5_1 | _get_a_mask_acc_T_11; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_12 = get_a_mask_sub_6_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_12 = get_a_mask_size & get_a_mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_12 = get_a_mask_sub_6_1 | _get_a_mask_acc_T_12; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_13 = get_a_mask_sub_6_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_13 = get_a_mask_size & get_a_mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_13 = get_a_mask_sub_6_1 | _get_a_mask_acc_T_13; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_14 = get_a_mask_sub_7_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _get_a_mask_acc_T_14 = get_a_mask_size & get_a_mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_14 = get_a_mask_sub_7_1 | _get_a_mask_acc_T_14; // @[Misc.scala:215:{29,38}] wire get_a_mask_eq_15 = get_a_mask_sub_7_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _get_a_mask_acc_T_15 = get_a_mask_size & get_a_mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38] wire get_a_mask_acc_15 = get_a_mask_sub_7_1 | _get_a_mask_acc_T_15; // @[Misc.scala:215:{29,38}] wire [1:0] get_a_mask_lo_lo_lo = {get_a_mask_acc_1, get_a_mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] get_a_mask_lo_lo_hi = {get_a_mask_acc_3, get_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] get_a_mask_lo_lo = {get_a_mask_lo_lo_hi, get_a_mask_lo_lo_lo}; // @[Misc.scala:222:10] wire [1:0] get_a_mask_lo_hi_lo = {get_a_mask_acc_5, get_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] get_a_mask_lo_hi_hi = {get_a_mask_acc_7, get_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] get_a_mask_lo_hi = {get_a_mask_lo_hi_hi, get_a_mask_lo_hi_lo}; // @[Misc.scala:222:10] wire [7:0] get_a_mask_lo = {get_a_mask_lo_hi, get_a_mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] get_a_mask_hi_lo_lo = {get_a_mask_acc_9, get_a_mask_acc_8}; // @[Misc.scala:215:29, :222:10] wire [1:0] get_a_mask_hi_lo_hi = {get_a_mask_acc_11, get_a_mask_acc_10}; // @[Misc.scala:215:29, :222:10] wire [3:0] get_a_mask_hi_lo = {get_a_mask_hi_lo_hi, get_a_mask_hi_lo_lo}; // @[Misc.scala:222:10] wire [1:0] get_a_mask_hi_hi_lo = {get_a_mask_acc_13, get_a_mask_acc_12}; // @[Misc.scala:215:29, :222:10] wire [1:0] get_a_mask_hi_hi_hi = {get_a_mask_acc_15, get_a_mask_acc_14}; // @[Misc.scala:215:29, :222:10] wire [3:0] get_a_mask_hi_hi = {get_a_mask_hi_hi_hi, get_a_mask_hi_hi_lo}; // @[Misc.scala:222:10] wire [7:0] get_a_mask_hi = {get_a_mask_hi_hi, get_a_mask_hi_lo}; // @[Misc.scala:222:10] assign _get_a_mask_T = {get_a_mask_hi, get_a_mask_lo}; // @[Misc.scala:222:10] assign get_mask = _get_a_mask_T; // @[Misc.scala:222:10] wire [40:0] _put_legal_T_5 = {1'h0, _put_legal_T_4}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_6 = _put_legal_T_5 & 41'h9A113000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_7 = _put_legal_T_6; // @[Parameters.scala:137:46] wire _put_legal_T_8 = _put_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _put_legal_T_9 = _put_legal_T_8; // @[Parameters.scala:684:54] wire _put_legal_T_69 = _put_legal_T_9; // @[Parameters.scala:684:54, :686:26] wire [40:0] _put_legal_T_15 = {1'h0, _put_legal_T_14}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_16 = _put_legal_T_15 & 41'h9A112000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_17 = _put_legal_T_16; // @[Parameters.scala:137:46] wire _put_legal_T_18 = _put_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_6 = {req_addr[39:21], req_addr[20:0] ^ 21'h100000}; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_19; // @[Parameters.scala:137:31] assign _put_legal_T_19 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_9; // @[Parameters.scala:137:31] assign _atomics_legal_T_9 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_68; // @[Parameters.scala:137:31] assign _atomics_legal_T_68 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_127; // @[Parameters.scala:137:31] assign _atomics_legal_T_127 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_186; // @[Parameters.scala:137:31] assign _atomics_legal_T_186 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_245; // @[Parameters.scala:137:31] assign _atomics_legal_T_245 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_304; // @[Parameters.scala:137:31] assign _atomics_legal_T_304 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_363; // @[Parameters.scala:137:31] assign _atomics_legal_T_363 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_422; // @[Parameters.scala:137:31] assign _atomics_legal_T_422 = _GEN_6; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_481; // @[Parameters.scala:137:31] assign _atomics_legal_T_481 = _GEN_6; // @[Parameters.scala:137:31] wire [40:0] _put_legal_T_20 = {1'h0, _put_legal_T_19}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_21 = _put_legal_T_20 & 41'h9A103000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_22 = _put_legal_T_21; // @[Parameters.scala:137:46] wire _put_legal_T_23 = _put_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _put_legal_T_25 = {1'h0, _put_legal_T_24}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_26 = _put_legal_T_25 & 41'h9A110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_27 = _put_legal_T_26; // @[Parameters.scala:137:46] wire _put_legal_T_28 = _put_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_7 = {req_addr[39:26], req_addr[25:0] ^ 26'h2010000}; // @[Parameters.scala:137:31] wire [39:0] _put_legal_T_29; // @[Parameters.scala:137:31] assign _put_legal_T_29 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_14; // @[Parameters.scala:137:31] assign _atomics_legal_T_14 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_73; // @[Parameters.scala:137:31] assign _atomics_legal_T_73 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_132; // @[Parameters.scala:137:31] assign _atomics_legal_T_132 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_191; // @[Parameters.scala:137:31] assign _atomics_legal_T_191 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_250; // @[Parameters.scala:137:31] assign _atomics_legal_T_250 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_309; // @[Parameters.scala:137:31] assign _atomics_legal_T_309 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_368; // @[Parameters.scala:137:31] assign _atomics_legal_T_368 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_427; // @[Parameters.scala:137:31] assign _atomics_legal_T_427 = _GEN_7; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_486; // @[Parameters.scala:137:31] assign _atomics_legal_T_486 = _GEN_7; // @[Parameters.scala:137:31] wire [40:0] _put_legal_T_30 = {1'h0, _put_legal_T_29}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_31 = _put_legal_T_30 & 41'h9A113000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_32 = _put_legal_T_31; // @[Parameters.scala:137:46] wire _put_legal_T_33 = _put_legal_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _put_legal_T_35 = {1'h0, _put_legal_T_34}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_36 = _put_legal_T_35 & 41'h98000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_37 = _put_legal_T_36; // @[Parameters.scala:137:46] wire _put_legal_T_38 = _put_legal_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _put_legal_T_40 = {1'h0, _put_legal_T_39}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_41 = _put_legal_T_40 & 41'h9A110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_42 = _put_legal_T_41; // @[Parameters.scala:137:46] wire _put_legal_T_43 = _put_legal_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _put_legal_T_45 = {1'h0, _put_legal_T_44}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_46 = _put_legal_T_45 & 41'h9A113000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_47 = _put_legal_T_46; // @[Parameters.scala:137:46] wire _put_legal_T_48 = _put_legal_T_47 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _put_legal_T_50 = {1'h0, _put_legal_T_49}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_51 = _put_legal_T_50 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_52 = _put_legal_T_51; // @[Parameters.scala:137:46] wire _put_legal_T_53 = _put_legal_T_52 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _put_legal_T_54 = _put_legal_T_18 | _put_legal_T_23; // @[Parameters.scala:685:42] wire _put_legal_T_55 = _put_legal_T_54 | _put_legal_T_28; // @[Parameters.scala:685:42] wire _put_legal_T_56 = _put_legal_T_55 | _put_legal_T_33; // @[Parameters.scala:685:42] wire _put_legal_T_57 = _put_legal_T_56 | _put_legal_T_38; // @[Parameters.scala:685:42] wire _put_legal_T_58 = _put_legal_T_57 | _put_legal_T_43; // @[Parameters.scala:685:42] wire _put_legal_T_59 = _put_legal_T_58 | _put_legal_T_48; // @[Parameters.scala:685:42] wire _put_legal_T_60 = _put_legal_T_59 | _put_legal_T_53; // @[Parameters.scala:685:42] wire _put_legal_T_61 = _put_legal_T_60; // @[Parameters.scala:684:54, :685:42] wire [40:0] _put_legal_T_64 = {1'h0, _put_legal_T_63}; // @[Parameters.scala:137:{31,41}] wire [40:0] _put_legal_T_65 = _put_legal_T_64 & 41'h9A110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _put_legal_T_66 = _put_legal_T_65; // @[Parameters.scala:137:46] wire _put_legal_T_67 = _put_legal_T_66 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _put_legal_T_70 = _put_legal_T_69 | _put_legal_T_61; // @[Parameters.scala:684:54, :686:26] wire put_legal = _put_legal_T_70; // @[Parameters.scala:686:26] wire [15:0] _put_a_mask_T; // @[Misc.scala:222:10] wire [15:0] put_mask; // @[Edges.scala:480:17] wire [1:0] put_a_mask_sizeOH_shiftAmount = _put_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _put_a_mask_sizeOH_T_1 = 4'h1 << put_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [3:0] _put_a_mask_sizeOH_T_2 = _put_a_mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [3:0] put_a_mask_sizeOH = {_put_a_mask_sizeOH_T_2[3:1], 1'h1}; // @[OneHot.scala:65:27] wire put_a_mask_sub_sub_sub_size = put_a_mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire put_a_mask_sub_sub_sub_1_2 = put_a_mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire put_a_mask_sub_sub_sub_nbit = ~put_a_mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire put_a_mask_sub_sub_sub_0_2 = put_a_mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_sub_sub_acc_T = put_a_mask_sub_sub_sub_size & put_a_mask_sub_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_sub_sub_0_1 = _put_a_mask_sub_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire _put_a_mask_sub_sub_sub_acc_T_1 = put_a_mask_sub_sub_sub_size & put_a_mask_sub_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_sub_sub_1_1 = _put_a_mask_sub_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_sub_size = put_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire put_a_mask_sub_sub_nbit = ~put_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire put_a_mask_sub_sub_0_2 = put_a_mask_sub_sub_sub_0_2 & put_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_sub_acc_T = put_a_mask_sub_sub_size & put_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_sub_0_1 = put_a_mask_sub_sub_sub_0_1 | _put_a_mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_sub_1_2 = put_a_mask_sub_sub_sub_0_2 & put_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_sub_sub_acc_T_1 = put_a_mask_sub_sub_size & put_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_sub_1_1 = put_a_mask_sub_sub_sub_0_1 | _put_a_mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_sub_2_2 = put_a_mask_sub_sub_sub_1_2 & put_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_sub_acc_T_2 = put_a_mask_sub_sub_size & put_a_mask_sub_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_sub_2_1 = put_a_mask_sub_sub_sub_1_1 | _put_a_mask_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_sub_3_2 = put_a_mask_sub_sub_sub_1_2 & put_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_sub_sub_acc_T_3 = put_a_mask_sub_sub_size & put_a_mask_sub_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_sub_3_1 = put_a_mask_sub_sub_sub_1_1 | _put_a_mask_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_size = put_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire put_a_mask_sub_nbit = ~put_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire put_a_mask_sub_0_2 = put_a_mask_sub_sub_0_2 & put_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_acc_T = put_a_mask_sub_size & put_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_0_1 = put_a_mask_sub_sub_0_1 | _put_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_1_2 = put_a_mask_sub_sub_0_2 & put_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_sub_acc_T_1 = put_a_mask_sub_size & put_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_1_1 = put_a_mask_sub_sub_0_1 | _put_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_2_2 = put_a_mask_sub_sub_1_2 & put_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_acc_T_2 = put_a_mask_sub_size & put_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_2_1 = put_a_mask_sub_sub_1_1 | _put_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_3_2 = put_a_mask_sub_sub_1_2 & put_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_sub_acc_T_3 = put_a_mask_sub_size & put_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_3_1 = put_a_mask_sub_sub_1_1 | _put_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_4_2 = put_a_mask_sub_sub_2_2 & put_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_acc_T_4 = put_a_mask_sub_size & put_a_mask_sub_4_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_4_1 = put_a_mask_sub_sub_2_1 | _put_a_mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_5_2 = put_a_mask_sub_sub_2_2 & put_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_sub_acc_T_5 = put_a_mask_sub_size & put_a_mask_sub_5_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_5_1 = put_a_mask_sub_sub_2_1 | _put_a_mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_6_2 = put_a_mask_sub_sub_3_2 & put_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_sub_acc_T_6 = put_a_mask_sub_size & put_a_mask_sub_6_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_6_1 = put_a_mask_sub_sub_3_1 | _put_a_mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire put_a_mask_sub_7_2 = put_a_mask_sub_sub_3_2 & put_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_sub_acc_T_7 = put_a_mask_sub_size & put_a_mask_sub_7_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_sub_7_1 = put_a_mask_sub_sub_3_1 | _put_a_mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire put_a_mask_size = put_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire put_a_mask_nbit = ~put_a_mask_bit; // @[Misc.scala:210:26, :211:20] wire put_a_mask_eq = put_a_mask_sub_0_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T = put_a_mask_size & put_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc = put_a_mask_sub_0_1 | _put_a_mask_acc_T; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_1 = put_a_mask_sub_0_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_1 = put_a_mask_size & put_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_1 = put_a_mask_sub_0_1 | _put_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_2 = put_a_mask_sub_1_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_2 = put_a_mask_size & put_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_2 = put_a_mask_sub_1_1 | _put_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_3 = put_a_mask_sub_1_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_3 = put_a_mask_size & put_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_3 = put_a_mask_sub_1_1 | _put_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_4 = put_a_mask_sub_2_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_4 = put_a_mask_size & put_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_4 = put_a_mask_sub_2_1 | _put_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_5 = put_a_mask_sub_2_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_5 = put_a_mask_size & put_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_5 = put_a_mask_sub_2_1 | _put_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_6 = put_a_mask_sub_3_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_6 = put_a_mask_size & put_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_6 = put_a_mask_sub_3_1 | _put_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_7 = put_a_mask_sub_3_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_7 = put_a_mask_size & put_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_7 = put_a_mask_sub_3_1 | _put_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_8 = put_a_mask_sub_4_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_8 = put_a_mask_size & put_a_mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_8 = put_a_mask_sub_4_1 | _put_a_mask_acc_T_8; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_9 = put_a_mask_sub_4_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_9 = put_a_mask_size & put_a_mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_9 = put_a_mask_sub_4_1 | _put_a_mask_acc_T_9; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_10 = put_a_mask_sub_5_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_10 = put_a_mask_size & put_a_mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_10 = put_a_mask_sub_5_1 | _put_a_mask_acc_T_10; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_11 = put_a_mask_sub_5_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_11 = put_a_mask_size & put_a_mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_11 = put_a_mask_sub_5_1 | _put_a_mask_acc_T_11; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_12 = put_a_mask_sub_6_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_12 = put_a_mask_size & put_a_mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_12 = put_a_mask_sub_6_1 | _put_a_mask_acc_T_12; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_13 = put_a_mask_sub_6_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_13 = put_a_mask_size & put_a_mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_13 = put_a_mask_sub_6_1 | _put_a_mask_acc_T_13; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_14 = put_a_mask_sub_7_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _put_a_mask_acc_T_14 = put_a_mask_size & put_a_mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_14 = put_a_mask_sub_7_1 | _put_a_mask_acc_T_14; // @[Misc.scala:215:{29,38}] wire put_a_mask_eq_15 = put_a_mask_sub_7_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _put_a_mask_acc_T_15 = put_a_mask_size & put_a_mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38] wire put_a_mask_acc_15 = put_a_mask_sub_7_1 | _put_a_mask_acc_T_15; // @[Misc.scala:215:{29,38}] wire [1:0] put_a_mask_lo_lo_lo = {put_a_mask_acc_1, put_a_mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] put_a_mask_lo_lo_hi = {put_a_mask_acc_3, put_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] put_a_mask_lo_lo = {put_a_mask_lo_lo_hi, put_a_mask_lo_lo_lo}; // @[Misc.scala:222:10] wire [1:0] put_a_mask_lo_hi_lo = {put_a_mask_acc_5, put_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] put_a_mask_lo_hi_hi = {put_a_mask_acc_7, put_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] put_a_mask_lo_hi = {put_a_mask_lo_hi_hi, put_a_mask_lo_hi_lo}; // @[Misc.scala:222:10] wire [7:0] put_a_mask_lo = {put_a_mask_lo_hi, put_a_mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] put_a_mask_hi_lo_lo = {put_a_mask_acc_9, put_a_mask_acc_8}; // @[Misc.scala:215:29, :222:10] wire [1:0] put_a_mask_hi_lo_hi = {put_a_mask_acc_11, put_a_mask_acc_10}; // @[Misc.scala:215:29, :222:10] wire [3:0] put_a_mask_hi_lo = {put_a_mask_hi_lo_hi, put_a_mask_hi_lo_lo}; // @[Misc.scala:222:10] wire [1:0] put_a_mask_hi_hi_lo = {put_a_mask_acc_13, put_a_mask_acc_12}; // @[Misc.scala:215:29, :222:10] wire [1:0] put_a_mask_hi_hi_hi = {put_a_mask_acc_15, put_a_mask_acc_14}; // @[Misc.scala:215:29, :222:10] wire [3:0] put_a_mask_hi_hi = {put_a_mask_hi_hi_hi, put_a_mask_hi_hi_lo}; // @[Misc.scala:222:10] wire [7:0] put_a_mask_hi = {put_a_mask_hi_hi, put_a_mask_hi_lo}; // @[Misc.scala:222:10] assign _put_a_mask_T = {put_a_mask_hi, put_a_mask_lo}; // @[Misc.scala:222:10] assign put_mask = _put_a_mask_T; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_5 = {1'h0, _atomics_legal_T_4}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_6 = _atomics_legal_T_5 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_7 = _atomics_legal_T_6; // @[Parameters.scala:137:46] wire _atomics_legal_T_8 = _atomics_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_10 = {1'h0, _atomics_legal_T_9}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_11 = _atomics_legal_T_10 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_12 = _atomics_legal_T_11; // @[Parameters.scala:137:46] wire _atomics_legal_T_13 = _atomics_legal_T_12 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_15 = {1'h0, _atomics_legal_T_14}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_16 = _atomics_legal_T_15 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_17 = _atomics_legal_T_16; // @[Parameters.scala:137:46] wire _atomics_legal_T_18 = _atomics_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_8 = {req_addr[39:28], req_addr[27:0] ^ 28'hC000000}; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_19; // @[Parameters.scala:137:31] assign _atomics_legal_T_19 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_78; // @[Parameters.scala:137:31] assign _atomics_legal_T_78 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_137; // @[Parameters.scala:137:31] assign _atomics_legal_T_137 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_196; // @[Parameters.scala:137:31] assign _atomics_legal_T_196 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_255; // @[Parameters.scala:137:31] assign _atomics_legal_T_255 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_314; // @[Parameters.scala:137:31] assign _atomics_legal_T_314 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_373; // @[Parameters.scala:137:31] assign _atomics_legal_T_373 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_432; // @[Parameters.scala:137:31] assign _atomics_legal_T_432 = _GEN_8; // @[Parameters.scala:137:31] wire [39:0] _atomics_legal_T_491; // @[Parameters.scala:137:31] assign _atomics_legal_T_491 = _GEN_8; // @[Parameters.scala:137:31] wire [40:0] _atomics_legal_T_20 = {1'h0, _atomics_legal_T_19}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_21 = _atomics_legal_T_20 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_22 = _atomics_legal_T_21; // @[Parameters.scala:137:46] wire _atomics_legal_T_23 = _atomics_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_25 = {1'h0, _atomics_legal_T_24}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_26 = _atomics_legal_T_25 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_27 = _atomics_legal_T_26; // @[Parameters.scala:137:46] wire _atomics_legal_T_28 = _atomics_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_29 = _atomics_legal_T_8 | _atomics_legal_T_13; // @[Parameters.scala:685:42] wire _atomics_legal_T_30 = _atomics_legal_T_29 | _atomics_legal_T_18; // @[Parameters.scala:685:42] wire _atomics_legal_T_31 = _atomics_legal_T_30 | _atomics_legal_T_23; // @[Parameters.scala:685:42] wire _atomics_legal_T_32 = _atomics_legal_T_31 | _atomics_legal_T_28; // @[Parameters.scala:685:42] wire _atomics_legal_T_33 = _atomics_legal_T_32; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_57 = _atomics_legal_T_33; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_36 = {1'h0, _atomics_legal_T_35}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_37 = _atomics_legal_T_36 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_38 = _atomics_legal_T_37; // @[Parameters.scala:137:46] wire _atomics_legal_T_39 = _atomics_legal_T_38 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_46 = {1'h0, _atomics_legal_T_45}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_47 = _atomics_legal_T_46 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_48 = _atomics_legal_T_47; // @[Parameters.scala:137:46] wire _atomics_legal_T_49 = _atomics_legal_T_48 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_51 = {1'h0, _atomics_legal_T_50}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_52 = _atomics_legal_T_51 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_53 = _atomics_legal_T_52; // @[Parameters.scala:137:46] wire _atomics_legal_T_54 = _atomics_legal_T_53 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_55 = _atomics_legal_T_49 | _atomics_legal_T_54; // @[Parameters.scala:685:42] wire _atomics_legal_T_56 = _atomics_legal_T_55; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_58 = _atomics_legal_T_57; // @[Parameters.scala:686:26] wire atomics_legal = _atomics_legal_T_58 | _atomics_legal_T_56; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T; // @[Misc.scala:222:10] wire [15:0] atomics_a_mask; // @[Edges.scala:534:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount = _atomics_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_1 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_2 = _atomics_a_mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH = {_atomics_a_mask_sizeOH_T_2[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size = atomics_a_mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2 = atomics_a_mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit = ~atomics_a_mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2 = atomics_a_mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T = atomics_a_mask_sub_sub_sub_size & atomics_a_mask_sub_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1 = _atomics_a_mask_sub_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_1 = atomics_a_mask_sub_sub_sub_size & atomics_a_mask_sub_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1 = _atomics_a_mask_sub_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size = atomics_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit = ~atomics_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2 = atomics_a_mask_sub_sub_sub_0_2 & atomics_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T = atomics_a_mask_sub_sub_size & atomics_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1 = atomics_a_mask_sub_sub_sub_0_1 | _atomics_a_mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2 = atomics_a_mask_sub_sub_sub_0_2 & atomics_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_1 = atomics_a_mask_sub_sub_size & atomics_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1 = atomics_a_mask_sub_sub_sub_0_1 | _atomics_a_mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2 = atomics_a_mask_sub_sub_sub_1_2 & atomics_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_2 = atomics_a_mask_sub_sub_size & atomics_a_mask_sub_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1 = atomics_a_mask_sub_sub_sub_1_1 | _atomics_a_mask_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2 = atomics_a_mask_sub_sub_sub_1_2 & atomics_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_3 = atomics_a_mask_sub_sub_size & atomics_a_mask_sub_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1 = atomics_a_mask_sub_sub_sub_1_1 | _atomics_a_mask_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size = atomics_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit = ~atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2 = atomics_a_mask_sub_sub_0_2 & atomics_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T = atomics_a_mask_sub_size & atomics_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1 = atomics_a_mask_sub_sub_0_1 | _atomics_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2 = atomics_a_mask_sub_sub_0_2 & atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_1 = atomics_a_mask_sub_size & atomics_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1 = atomics_a_mask_sub_sub_0_1 | _atomics_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2 = atomics_a_mask_sub_sub_1_2 & atomics_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_2 = atomics_a_mask_sub_size & atomics_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1 = atomics_a_mask_sub_sub_1_1 | _atomics_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2 = atomics_a_mask_sub_sub_1_2 & atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_3 = atomics_a_mask_sub_size & atomics_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1 = atomics_a_mask_sub_sub_1_1 | _atomics_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2 = atomics_a_mask_sub_sub_2_2 & atomics_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_4 = atomics_a_mask_sub_size & atomics_a_mask_sub_4_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1 = atomics_a_mask_sub_sub_2_1 | _atomics_a_mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2 = atomics_a_mask_sub_sub_2_2 & atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_5 = atomics_a_mask_sub_size & atomics_a_mask_sub_5_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1 = atomics_a_mask_sub_sub_2_1 | _atomics_a_mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2 = atomics_a_mask_sub_sub_3_2 & atomics_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_6 = atomics_a_mask_sub_size & atomics_a_mask_sub_6_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1 = atomics_a_mask_sub_sub_3_1 | _atomics_a_mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2 = atomics_a_mask_sub_sub_3_2 & atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_7 = atomics_a_mask_sub_size & atomics_a_mask_sub_7_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1 = atomics_a_mask_sub_sub_3_1 | _atomics_a_mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size = atomics_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit = ~atomics_a_mask_bit; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq = atomics_a_mask_sub_0_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T = atomics_a_mask_size & atomics_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc = atomics_a_mask_sub_0_1 | _atomics_a_mask_acc_T; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_1 = atomics_a_mask_sub_0_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_1 = atomics_a_mask_size & atomics_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_1 = atomics_a_mask_sub_0_1 | _atomics_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_2 = atomics_a_mask_sub_1_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_2 = atomics_a_mask_size & atomics_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_2 = atomics_a_mask_sub_1_1 | _atomics_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_3 = atomics_a_mask_sub_1_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_3 = atomics_a_mask_size & atomics_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_3 = atomics_a_mask_sub_1_1 | _atomics_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_4 = atomics_a_mask_sub_2_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_4 = atomics_a_mask_size & atomics_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_4 = atomics_a_mask_sub_2_1 | _atomics_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_5 = atomics_a_mask_sub_2_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_5 = atomics_a_mask_size & atomics_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_5 = atomics_a_mask_sub_2_1 | _atomics_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_6 = atomics_a_mask_sub_3_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_6 = atomics_a_mask_size & atomics_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_6 = atomics_a_mask_sub_3_1 | _atomics_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_7 = atomics_a_mask_sub_3_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_7 = atomics_a_mask_size & atomics_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_7 = atomics_a_mask_sub_3_1 | _atomics_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_8 = atomics_a_mask_sub_4_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_8 = atomics_a_mask_size & atomics_a_mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_8 = atomics_a_mask_sub_4_1 | _atomics_a_mask_acc_T_8; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_9 = atomics_a_mask_sub_4_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_9 = atomics_a_mask_size & atomics_a_mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_9 = atomics_a_mask_sub_4_1 | _atomics_a_mask_acc_T_9; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_10 = atomics_a_mask_sub_5_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_10 = atomics_a_mask_size & atomics_a_mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_10 = atomics_a_mask_sub_5_1 | _atomics_a_mask_acc_T_10; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_11 = atomics_a_mask_sub_5_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_11 = atomics_a_mask_size & atomics_a_mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_11 = atomics_a_mask_sub_5_1 | _atomics_a_mask_acc_T_11; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_12 = atomics_a_mask_sub_6_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_12 = atomics_a_mask_size & atomics_a_mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_12 = atomics_a_mask_sub_6_1 | _atomics_a_mask_acc_T_12; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_13 = atomics_a_mask_sub_6_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_13 = atomics_a_mask_size & atomics_a_mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_13 = atomics_a_mask_sub_6_1 | _atomics_a_mask_acc_T_13; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_14 = atomics_a_mask_sub_7_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_14 = atomics_a_mask_size & atomics_a_mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_14 = atomics_a_mask_sub_7_1 | _atomics_a_mask_acc_T_14; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_15 = atomics_a_mask_sub_7_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_15 = atomics_a_mask_size & atomics_a_mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_15 = atomics_a_mask_sub_7_1 | _atomics_a_mask_acc_T_15; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo = {atomics_a_mask_acc_1, atomics_a_mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi = {atomics_a_mask_acc_3, atomics_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo = {atomics_a_mask_lo_lo_hi, atomics_a_mask_lo_lo_lo}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo = {atomics_a_mask_acc_5, atomics_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi = {atomics_a_mask_acc_7, atomics_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi = {atomics_a_mask_lo_hi_hi, atomics_a_mask_lo_hi_lo}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo = {atomics_a_mask_lo_hi, atomics_a_mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo = {atomics_a_mask_acc_9, atomics_a_mask_acc_8}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi = {atomics_a_mask_acc_11, atomics_a_mask_acc_10}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo = {atomics_a_mask_hi_lo_hi, atomics_a_mask_hi_lo_lo}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo = {atomics_a_mask_acc_13, atomics_a_mask_acc_12}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi = {atomics_a_mask_acc_15, atomics_a_mask_acc_14}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi = {atomics_a_mask_hi_hi_hi, atomics_a_mask_hi_hi_lo}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi = {atomics_a_mask_hi_hi, atomics_a_mask_hi_lo}; // @[Misc.scala:222:10] assign _atomics_a_mask_T = {atomics_a_mask_hi, atomics_a_mask_lo}; // @[Misc.scala:222:10] assign atomics_a_mask = _atomics_a_mask_T; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_64 = {1'h0, _atomics_legal_T_63}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_65 = _atomics_legal_T_64 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_66 = _atomics_legal_T_65; // @[Parameters.scala:137:46] wire _atomics_legal_T_67 = _atomics_legal_T_66 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_69 = {1'h0, _atomics_legal_T_68}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_70 = _atomics_legal_T_69 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_71 = _atomics_legal_T_70; // @[Parameters.scala:137:46] wire _atomics_legal_T_72 = _atomics_legal_T_71 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_74 = {1'h0, _atomics_legal_T_73}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_75 = _atomics_legal_T_74 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_76 = _atomics_legal_T_75; // @[Parameters.scala:137:46] wire _atomics_legal_T_77 = _atomics_legal_T_76 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_79 = {1'h0, _atomics_legal_T_78}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_80 = _atomics_legal_T_79 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_81 = _atomics_legal_T_80; // @[Parameters.scala:137:46] wire _atomics_legal_T_82 = _atomics_legal_T_81 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_84 = {1'h0, _atomics_legal_T_83}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_85 = _atomics_legal_T_84 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_86 = _atomics_legal_T_85; // @[Parameters.scala:137:46] wire _atomics_legal_T_87 = _atomics_legal_T_86 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_88 = _atomics_legal_T_67 | _atomics_legal_T_72; // @[Parameters.scala:685:42] wire _atomics_legal_T_89 = _atomics_legal_T_88 | _atomics_legal_T_77; // @[Parameters.scala:685:42] wire _atomics_legal_T_90 = _atomics_legal_T_89 | _atomics_legal_T_82; // @[Parameters.scala:685:42] wire _atomics_legal_T_91 = _atomics_legal_T_90 | _atomics_legal_T_87; // @[Parameters.scala:685:42] wire _atomics_legal_T_92 = _atomics_legal_T_91; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_116 = _atomics_legal_T_92; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_95 = {1'h0, _atomics_legal_T_94}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_96 = _atomics_legal_T_95 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_97 = _atomics_legal_T_96; // @[Parameters.scala:137:46] wire _atomics_legal_T_98 = _atomics_legal_T_97 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_105 = {1'h0, _atomics_legal_T_104}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_106 = _atomics_legal_T_105 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_107 = _atomics_legal_T_106; // @[Parameters.scala:137:46] wire _atomics_legal_T_108 = _atomics_legal_T_107 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_110 = {1'h0, _atomics_legal_T_109}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_111 = _atomics_legal_T_110 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_112 = _atomics_legal_T_111; // @[Parameters.scala:137:46] wire _atomics_legal_T_113 = _atomics_legal_T_112 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_114 = _atomics_legal_T_108 | _atomics_legal_T_113; // @[Parameters.scala:685:42] wire _atomics_legal_T_115 = _atomics_legal_T_114; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_117 = _atomics_legal_T_116; // @[Parameters.scala:686:26] wire atomics_legal_1 = _atomics_legal_T_117 | _atomics_legal_T_115; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_1; // @[Misc.scala:222:10] wire [15:0] atomics_a_1_mask; // @[Edges.scala:534:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_1 = _atomics_a_mask_sizeOH_T_3[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_4 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_1; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_5 = _atomics_a_mask_sizeOH_T_4; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_1 = {_atomics_a_mask_sizeOH_T_5[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_1 = atomics_a_mask_sizeOH_1[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_1 = atomics_a_mask_sub_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_1 = ~atomics_a_mask_sub_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_1 = atomics_a_mask_sub_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_2 = atomics_a_mask_sub_sub_sub_size_1 & atomics_a_mask_sub_sub_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_1 = _atomics_a_mask_sub_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_3 = atomics_a_mask_sub_sub_sub_size_1 & atomics_a_mask_sub_sub_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_1 = _atomics_a_mask_sub_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_1 = atomics_a_mask_sizeOH_1[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_1 = ~atomics_a_mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_1 = atomics_a_mask_sub_sub_sub_0_2_1 & atomics_a_mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_4 = atomics_a_mask_sub_sub_size_1 & atomics_a_mask_sub_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_1 = atomics_a_mask_sub_sub_sub_0_1_1 | _atomics_a_mask_sub_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_1 = atomics_a_mask_sub_sub_sub_0_2_1 & atomics_a_mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_5 = atomics_a_mask_sub_sub_size_1 & atomics_a_mask_sub_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_1 = atomics_a_mask_sub_sub_sub_0_1_1 | _atomics_a_mask_sub_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_1 = atomics_a_mask_sub_sub_sub_1_2_1 & atomics_a_mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_6 = atomics_a_mask_sub_sub_size_1 & atomics_a_mask_sub_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_1 = atomics_a_mask_sub_sub_sub_1_1_1 | _atomics_a_mask_sub_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_1 = atomics_a_mask_sub_sub_sub_1_2_1 & atomics_a_mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_7 = atomics_a_mask_sub_sub_size_1 & atomics_a_mask_sub_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_1 = atomics_a_mask_sub_sub_sub_1_1_1 | _atomics_a_mask_sub_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_1 = atomics_a_mask_sizeOH_1[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_1 = ~atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_1 = atomics_a_mask_sub_sub_0_2_1 & atomics_a_mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_8 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_1 = atomics_a_mask_sub_sub_0_1_1 | _atomics_a_mask_sub_acc_T_8; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_1 = atomics_a_mask_sub_sub_0_2_1 & atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_9 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_1 = atomics_a_mask_sub_sub_0_1_1 | _atomics_a_mask_sub_acc_T_9; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_1 = atomics_a_mask_sub_sub_1_2_1 & atomics_a_mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_10 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_1 = atomics_a_mask_sub_sub_1_1_1 | _atomics_a_mask_sub_acc_T_10; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_1 = atomics_a_mask_sub_sub_1_2_1 & atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_11 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_1 = atomics_a_mask_sub_sub_1_1_1 | _atomics_a_mask_sub_acc_T_11; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_1 = atomics_a_mask_sub_sub_2_2_1 & atomics_a_mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_12 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_4_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_1 = atomics_a_mask_sub_sub_2_1_1 | _atomics_a_mask_sub_acc_T_12; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_1 = atomics_a_mask_sub_sub_2_2_1 & atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_13 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_5_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_1 = atomics_a_mask_sub_sub_2_1_1 | _atomics_a_mask_sub_acc_T_13; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_1 = atomics_a_mask_sub_sub_3_2_1 & atomics_a_mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_14 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_6_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_1 = atomics_a_mask_sub_sub_3_1_1 | _atomics_a_mask_sub_acc_T_14; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_1 = atomics_a_mask_sub_sub_3_2_1 & atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_15 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_7_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_1 = atomics_a_mask_sub_sub_3_1_1 | _atomics_a_mask_sub_acc_T_15; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_1 = atomics_a_mask_sizeOH_1[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_1 = ~atomics_a_mask_bit_1; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_16 = atomics_a_mask_sub_0_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_16 = atomics_a_mask_size_1 & atomics_a_mask_eq_16; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_16 = atomics_a_mask_sub_0_1_1 | _atomics_a_mask_acc_T_16; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_17 = atomics_a_mask_sub_0_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_17 = atomics_a_mask_size_1 & atomics_a_mask_eq_17; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_17 = atomics_a_mask_sub_0_1_1 | _atomics_a_mask_acc_T_17; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_18 = atomics_a_mask_sub_1_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_18 = atomics_a_mask_size_1 & atomics_a_mask_eq_18; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_18 = atomics_a_mask_sub_1_1_1 | _atomics_a_mask_acc_T_18; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_19 = atomics_a_mask_sub_1_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_19 = atomics_a_mask_size_1 & atomics_a_mask_eq_19; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_19 = atomics_a_mask_sub_1_1_1 | _atomics_a_mask_acc_T_19; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_20 = atomics_a_mask_sub_2_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_20 = atomics_a_mask_size_1 & atomics_a_mask_eq_20; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_20 = atomics_a_mask_sub_2_1_1 | _atomics_a_mask_acc_T_20; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_21 = atomics_a_mask_sub_2_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_21 = atomics_a_mask_size_1 & atomics_a_mask_eq_21; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_21 = atomics_a_mask_sub_2_1_1 | _atomics_a_mask_acc_T_21; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_22 = atomics_a_mask_sub_3_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_22 = atomics_a_mask_size_1 & atomics_a_mask_eq_22; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_22 = atomics_a_mask_sub_3_1_1 | _atomics_a_mask_acc_T_22; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_23 = atomics_a_mask_sub_3_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_23 = atomics_a_mask_size_1 & atomics_a_mask_eq_23; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_23 = atomics_a_mask_sub_3_1_1 | _atomics_a_mask_acc_T_23; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_24 = atomics_a_mask_sub_4_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_24 = atomics_a_mask_size_1 & atomics_a_mask_eq_24; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_24 = atomics_a_mask_sub_4_1_1 | _atomics_a_mask_acc_T_24; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_25 = atomics_a_mask_sub_4_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_25 = atomics_a_mask_size_1 & atomics_a_mask_eq_25; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_25 = atomics_a_mask_sub_4_1_1 | _atomics_a_mask_acc_T_25; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_26 = atomics_a_mask_sub_5_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_26 = atomics_a_mask_size_1 & atomics_a_mask_eq_26; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_26 = atomics_a_mask_sub_5_1_1 | _atomics_a_mask_acc_T_26; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_27 = atomics_a_mask_sub_5_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_27 = atomics_a_mask_size_1 & atomics_a_mask_eq_27; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_27 = atomics_a_mask_sub_5_1_1 | _atomics_a_mask_acc_T_27; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_28 = atomics_a_mask_sub_6_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_28 = atomics_a_mask_size_1 & atomics_a_mask_eq_28; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_28 = atomics_a_mask_sub_6_1_1 | _atomics_a_mask_acc_T_28; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_29 = atomics_a_mask_sub_6_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_29 = atomics_a_mask_size_1 & atomics_a_mask_eq_29; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_29 = atomics_a_mask_sub_6_1_1 | _atomics_a_mask_acc_T_29; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_30 = atomics_a_mask_sub_7_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_30 = atomics_a_mask_size_1 & atomics_a_mask_eq_30; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_30 = atomics_a_mask_sub_7_1_1 | _atomics_a_mask_acc_T_30; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_31 = atomics_a_mask_sub_7_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_31 = atomics_a_mask_size_1 & atomics_a_mask_eq_31; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_31 = atomics_a_mask_sub_7_1_1 | _atomics_a_mask_acc_T_31; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_1 = {atomics_a_mask_acc_17, atomics_a_mask_acc_16}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_1 = {atomics_a_mask_acc_19, atomics_a_mask_acc_18}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_1 = {atomics_a_mask_lo_lo_hi_1, atomics_a_mask_lo_lo_lo_1}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_1 = {atomics_a_mask_acc_21, atomics_a_mask_acc_20}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_1 = {atomics_a_mask_acc_23, atomics_a_mask_acc_22}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_1 = {atomics_a_mask_lo_hi_hi_1, atomics_a_mask_lo_hi_lo_1}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_1 = {atomics_a_mask_lo_hi_1, atomics_a_mask_lo_lo_1}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_1 = {atomics_a_mask_acc_25, atomics_a_mask_acc_24}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_1 = {atomics_a_mask_acc_27, atomics_a_mask_acc_26}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_1 = {atomics_a_mask_hi_lo_hi_1, atomics_a_mask_hi_lo_lo_1}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_1 = {atomics_a_mask_acc_29, atomics_a_mask_acc_28}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_1 = {atomics_a_mask_acc_31, atomics_a_mask_acc_30}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_1 = {atomics_a_mask_hi_hi_hi_1, atomics_a_mask_hi_hi_lo_1}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_1 = {atomics_a_mask_hi_hi_1, atomics_a_mask_hi_lo_1}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_1 = {atomics_a_mask_hi_1, atomics_a_mask_lo_1}; // @[Misc.scala:222:10] assign atomics_a_1_mask = _atomics_a_mask_T_1; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_123 = {1'h0, _atomics_legal_T_122}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_124 = _atomics_legal_T_123 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_125 = _atomics_legal_T_124; // @[Parameters.scala:137:46] wire _atomics_legal_T_126 = _atomics_legal_T_125 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_128 = {1'h0, _atomics_legal_T_127}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_129 = _atomics_legal_T_128 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_130 = _atomics_legal_T_129; // @[Parameters.scala:137:46] wire _atomics_legal_T_131 = _atomics_legal_T_130 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_133 = {1'h0, _atomics_legal_T_132}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_134 = _atomics_legal_T_133 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_135 = _atomics_legal_T_134; // @[Parameters.scala:137:46] wire _atomics_legal_T_136 = _atomics_legal_T_135 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_138 = {1'h0, _atomics_legal_T_137}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_139 = _atomics_legal_T_138 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_140 = _atomics_legal_T_139; // @[Parameters.scala:137:46] wire _atomics_legal_T_141 = _atomics_legal_T_140 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_143 = {1'h0, _atomics_legal_T_142}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_144 = _atomics_legal_T_143 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_145 = _atomics_legal_T_144; // @[Parameters.scala:137:46] wire _atomics_legal_T_146 = _atomics_legal_T_145 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_147 = _atomics_legal_T_126 | _atomics_legal_T_131; // @[Parameters.scala:685:42] wire _atomics_legal_T_148 = _atomics_legal_T_147 | _atomics_legal_T_136; // @[Parameters.scala:685:42] wire _atomics_legal_T_149 = _atomics_legal_T_148 | _atomics_legal_T_141; // @[Parameters.scala:685:42] wire _atomics_legal_T_150 = _atomics_legal_T_149 | _atomics_legal_T_146; // @[Parameters.scala:685:42] wire _atomics_legal_T_151 = _atomics_legal_T_150; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_175 = _atomics_legal_T_151; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_154 = {1'h0, _atomics_legal_T_153}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_155 = _atomics_legal_T_154 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_156 = _atomics_legal_T_155; // @[Parameters.scala:137:46] wire _atomics_legal_T_157 = _atomics_legal_T_156 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_164 = {1'h0, _atomics_legal_T_163}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_165 = _atomics_legal_T_164 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_166 = _atomics_legal_T_165; // @[Parameters.scala:137:46] wire _atomics_legal_T_167 = _atomics_legal_T_166 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_169 = {1'h0, _atomics_legal_T_168}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_170 = _atomics_legal_T_169 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_171 = _atomics_legal_T_170; // @[Parameters.scala:137:46] wire _atomics_legal_T_172 = _atomics_legal_T_171 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_173 = _atomics_legal_T_167 | _atomics_legal_T_172; // @[Parameters.scala:685:42] wire _atomics_legal_T_174 = _atomics_legal_T_173; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_176 = _atomics_legal_T_175; // @[Parameters.scala:686:26] wire atomics_legal_2 = _atomics_legal_T_176 | _atomics_legal_T_174; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_2; // @[Misc.scala:222:10] wire [15:0] atomics_a_2_mask; // @[Edges.scala:534:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_2 = _atomics_a_mask_sizeOH_T_6[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_7 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_2; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_8 = _atomics_a_mask_sizeOH_T_7; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_2 = {_atomics_a_mask_sizeOH_T_8[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_2 = atomics_a_mask_sizeOH_2[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_2 = atomics_a_mask_sub_sub_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_2 = ~atomics_a_mask_sub_sub_sub_bit_2; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_2 = atomics_a_mask_sub_sub_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_4 = atomics_a_mask_sub_sub_sub_size_2 & atomics_a_mask_sub_sub_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_2 = _atomics_a_mask_sub_sub_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_5 = atomics_a_mask_sub_sub_sub_size_2 & atomics_a_mask_sub_sub_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_2 = _atomics_a_mask_sub_sub_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_2 = atomics_a_mask_sizeOH_2[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_2 = ~atomics_a_mask_sub_sub_bit_2; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_2 = atomics_a_mask_sub_sub_sub_0_2_2 & atomics_a_mask_sub_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_8 = atomics_a_mask_sub_sub_size_2 & atomics_a_mask_sub_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_2 = atomics_a_mask_sub_sub_sub_0_1_2 | _atomics_a_mask_sub_sub_acc_T_8; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_2 = atomics_a_mask_sub_sub_sub_0_2_2 & atomics_a_mask_sub_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_9 = atomics_a_mask_sub_sub_size_2 & atomics_a_mask_sub_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_2 = atomics_a_mask_sub_sub_sub_0_1_2 | _atomics_a_mask_sub_sub_acc_T_9; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_2 = atomics_a_mask_sub_sub_sub_1_2_2 & atomics_a_mask_sub_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_10 = atomics_a_mask_sub_sub_size_2 & atomics_a_mask_sub_sub_2_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_2 = atomics_a_mask_sub_sub_sub_1_1_2 | _atomics_a_mask_sub_sub_acc_T_10; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_2 = atomics_a_mask_sub_sub_sub_1_2_2 & atomics_a_mask_sub_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_11 = atomics_a_mask_sub_sub_size_2 & atomics_a_mask_sub_sub_3_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_2 = atomics_a_mask_sub_sub_sub_1_1_2 | _atomics_a_mask_sub_sub_acc_T_11; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_2 = atomics_a_mask_sizeOH_2[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_2 = ~atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_2 = atomics_a_mask_sub_sub_0_2_2 & atomics_a_mask_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_16 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_2 = atomics_a_mask_sub_sub_0_1_2 | _atomics_a_mask_sub_acc_T_16; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_2 = atomics_a_mask_sub_sub_0_2_2 & atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_17 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_2 = atomics_a_mask_sub_sub_0_1_2 | _atomics_a_mask_sub_acc_T_17; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_2 = atomics_a_mask_sub_sub_1_2_2 & atomics_a_mask_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_18 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_2_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_2 = atomics_a_mask_sub_sub_1_1_2 | _atomics_a_mask_sub_acc_T_18; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_2 = atomics_a_mask_sub_sub_1_2_2 & atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_19 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_3_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_2 = atomics_a_mask_sub_sub_1_1_2 | _atomics_a_mask_sub_acc_T_19; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_2 = atomics_a_mask_sub_sub_2_2_2 & atomics_a_mask_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_20 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_4_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_2 = atomics_a_mask_sub_sub_2_1_2 | _atomics_a_mask_sub_acc_T_20; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_2 = atomics_a_mask_sub_sub_2_2_2 & atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_21 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_5_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_2 = atomics_a_mask_sub_sub_2_1_2 | _atomics_a_mask_sub_acc_T_21; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_2 = atomics_a_mask_sub_sub_3_2_2 & atomics_a_mask_sub_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_22 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_6_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_2 = atomics_a_mask_sub_sub_3_1_2 | _atomics_a_mask_sub_acc_T_22; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_2 = atomics_a_mask_sub_sub_3_2_2 & atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_23 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_7_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_2 = atomics_a_mask_sub_sub_3_1_2 | _atomics_a_mask_sub_acc_T_23; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_2 = atomics_a_mask_sizeOH_2[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_2 = ~atomics_a_mask_bit_2; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_32 = atomics_a_mask_sub_0_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_32 = atomics_a_mask_size_2 & atomics_a_mask_eq_32; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_32 = atomics_a_mask_sub_0_1_2 | _atomics_a_mask_acc_T_32; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_33 = atomics_a_mask_sub_0_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_33 = atomics_a_mask_size_2 & atomics_a_mask_eq_33; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_33 = atomics_a_mask_sub_0_1_2 | _atomics_a_mask_acc_T_33; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_34 = atomics_a_mask_sub_1_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_34 = atomics_a_mask_size_2 & atomics_a_mask_eq_34; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_34 = atomics_a_mask_sub_1_1_2 | _atomics_a_mask_acc_T_34; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_35 = atomics_a_mask_sub_1_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_35 = atomics_a_mask_size_2 & atomics_a_mask_eq_35; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_35 = atomics_a_mask_sub_1_1_2 | _atomics_a_mask_acc_T_35; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_36 = atomics_a_mask_sub_2_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_36 = atomics_a_mask_size_2 & atomics_a_mask_eq_36; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_36 = atomics_a_mask_sub_2_1_2 | _atomics_a_mask_acc_T_36; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_37 = atomics_a_mask_sub_2_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_37 = atomics_a_mask_size_2 & atomics_a_mask_eq_37; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_37 = atomics_a_mask_sub_2_1_2 | _atomics_a_mask_acc_T_37; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_38 = atomics_a_mask_sub_3_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_38 = atomics_a_mask_size_2 & atomics_a_mask_eq_38; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_38 = atomics_a_mask_sub_3_1_2 | _atomics_a_mask_acc_T_38; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_39 = atomics_a_mask_sub_3_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_39 = atomics_a_mask_size_2 & atomics_a_mask_eq_39; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_39 = atomics_a_mask_sub_3_1_2 | _atomics_a_mask_acc_T_39; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_40 = atomics_a_mask_sub_4_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_40 = atomics_a_mask_size_2 & atomics_a_mask_eq_40; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_40 = atomics_a_mask_sub_4_1_2 | _atomics_a_mask_acc_T_40; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_41 = atomics_a_mask_sub_4_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_41 = atomics_a_mask_size_2 & atomics_a_mask_eq_41; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_41 = atomics_a_mask_sub_4_1_2 | _atomics_a_mask_acc_T_41; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_42 = atomics_a_mask_sub_5_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_42 = atomics_a_mask_size_2 & atomics_a_mask_eq_42; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_42 = atomics_a_mask_sub_5_1_2 | _atomics_a_mask_acc_T_42; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_43 = atomics_a_mask_sub_5_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_43 = atomics_a_mask_size_2 & atomics_a_mask_eq_43; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_43 = atomics_a_mask_sub_5_1_2 | _atomics_a_mask_acc_T_43; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_44 = atomics_a_mask_sub_6_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_44 = atomics_a_mask_size_2 & atomics_a_mask_eq_44; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_44 = atomics_a_mask_sub_6_1_2 | _atomics_a_mask_acc_T_44; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_45 = atomics_a_mask_sub_6_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_45 = atomics_a_mask_size_2 & atomics_a_mask_eq_45; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_45 = atomics_a_mask_sub_6_1_2 | _atomics_a_mask_acc_T_45; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_46 = atomics_a_mask_sub_7_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_46 = atomics_a_mask_size_2 & atomics_a_mask_eq_46; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_46 = atomics_a_mask_sub_7_1_2 | _atomics_a_mask_acc_T_46; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_47 = atomics_a_mask_sub_7_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_47 = atomics_a_mask_size_2 & atomics_a_mask_eq_47; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_47 = atomics_a_mask_sub_7_1_2 | _atomics_a_mask_acc_T_47; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_2 = {atomics_a_mask_acc_33, atomics_a_mask_acc_32}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_2 = {atomics_a_mask_acc_35, atomics_a_mask_acc_34}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_2 = {atomics_a_mask_lo_lo_hi_2, atomics_a_mask_lo_lo_lo_2}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_2 = {atomics_a_mask_acc_37, atomics_a_mask_acc_36}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_2 = {atomics_a_mask_acc_39, atomics_a_mask_acc_38}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_2 = {atomics_a_mask_lo_hi_hi_2, atomics_a_mask_lo_hi_lo_2}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_2 = {atomics_a_mask_lo_hi_2, atomics_a_mask_lo_lo_2}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_2 = {atomics_a_mask_acc_41, atomics_a_mask_acc_40}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_2 = {atomics_a_mask_acc_43, atomics_a_mask_acc_42}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_2 = {atomics_a_mask_hi_lo_hi_2, atomics_a_mask_hi_lo_lo_2}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_2 = {atomics_a_mask_acc_45, atomics_a_mask_acc_44}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_2 = {atomics_a_mask_acc_47, atomics_a_mask_acc_46}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_2 = {atomics_a_mask_hi_hi_hi_2, atomics_a_mask_hi_hi_lo_2}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_2 = {atomics_a_mask_hi_hi_2, atomics_a_mask_hi_lo_2}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_2 = {atomics_a_mask_hi_2, atomics_a_mask_lo_2}; // @[Misc.scala:222:10] assign atomics_a_2_mask = _atomics_a_mask_T_2; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_182 = {1'h0, _atomics_legal_T_181}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_183 = _atomics_legal_T_182 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_184 = _atomics_legal_T_183; // @[Parameters.scala:137:46] wire _atomics_legal_T_185 = _atomics_legal_T_184 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_187 = {1'h0, _atomics_legal_T_186}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_188 = _atomics_legal_T_187 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_189 = _atomics_legal_T_188; // @[Parameters.scala:137:46] wire _atomics_legal_T_190 = _atomics_legal_T_189 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_192 = {1'h0, _atomics_legal_T_191}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_193 = _atomics_legal_T_192 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_194 = _atomics_legal_T_193; // @[Parameters.scala:137:46] wire _atomics_legal_T_195 = _atomics_legal_T_194 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_197 = {1'h0, _atomics_legal_T_196}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_198 = _atomics_legal_T_197 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_199 = _atomics_legal_T_198; // @[Parameters.scala:137:46] wire _atomics_legal_T_200 = _atomics_legal_T_199 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_202 = {1'h0, _atomics_legal_T_201}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_203 = _atomics_legal_T_202 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_204 = _atomics_legal_T_203; // @[Parameters.scala:137:46] wire _atomics_legal_T_205 = _atomics_legal_T_204 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_206 = _atomics_legal_T_185 | _atomics_legal_T_190; // @[Parameters.scala:685:42] wire _atomics_legal_T_207 = _atomics_legal_T_206 | _atomics_legal_T_195; // @[Parameters.scala:685:42] wire _atomics_legal_T_208 = _atomics_legal_T_207 | _atomics_legal_T_200; // @[Parameters.scala:685:42] wire _atomics_legal_T_209 = _atomics_legal_T_208 | _atomics_legal_T_205; // @[Parameters.scala:685:42] wire _atomics_legal_T_210 = _atomics_legal_T_209; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_234 = _atomics_legal_T_210; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_213 = {1'h0, _atomics_legal_T_212}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_214 = _atomics_legal_T_213 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_215 = _atomics_legal_T_214; // @[Parameters.scala:137:46] wire _atomics_legal_T_216 = _atomics_legal_T_215 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_223 = {1'h0, _atomics_legal_T_222}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_224 = _atomics_legal_T_223 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_225 = _atomics_legal_T_224; // @[Parameters.scala:137:46] wire _atomics_legal_T_226 = _atomics_legal_T_225 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_228 = {1'h0, _atomics_legal_T_227}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_229 = _atomics_legal_T_228 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_230 = _atomics_legal_T_229; // @[Parameters.scala:137:46] wire _atomics_legal_T_231 = _atomics_legal_T_230 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_232 = _atomics_legal_T_226 | _atomics_legal_T_231; // @[Parameters.scala:685:42] wire _atomics_legal_T_233 = _atomics_legal_T_232; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_235 = _atomics_legal_T_234; // @[Parameters.scala:686:26] wire atomics_legal_3 = _atomics_legal_T_235 | _atomics_legal_T_233; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_3; // @[Misc.scala:222:10] wire [15:0] atomics_a_3_mask; // @[Edges.scala:534:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_3 = _atomics_a_mask_sizeOH_T_9[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_10 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_3; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_11 = _atomics_a_mask_sizeOH_T_10; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_3 = {_atomics_a_mask_sizeOH_T_11[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_3 = atomics_a_mask_sizeOH_3[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_3 = atomics_a_mask_sub_sub_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_3 = ~atomics_a_mask_sub_sub_sub_bit_3; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_3 = atomics_a_mask_sub_sub_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_6 = atomics_a_mask_sub_sub_sub_size_3 & atomics_a_mask_sub_sub_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_3 = _atomics_a_mask_sub_sub_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_7 = atomics_a_mask_sub_sub_sub_size_3 & atomics_a_mask_sub_sub_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_3 = _atomics_a_mask_sub_sub_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_3 = atomics_a_mask_sizeOH_3[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_3 = ~atomics_a_mask_sub_sub_bit_3; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_3 = atomics_a_mask_sub_sub_sub_0_2_3 & atomics_a_mask_sub_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_12 = atomics_a_mask_sub_sub_size_3 & atomics_a_mask_sub_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_3 = atomics_a_mask_sub_sub_sub_0_1_3 | _atomics_a_mask_sub_sub_acc_T_12; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_3 = atomics_a_mask_sub_sub_sub_0_2_3 & atomics_a_mask_sub_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_13 = atomics_a_mask_sub_sub_size_3 & atomics_a_mask_sub_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_3 = atomics_a_mask_sub_sub_sub_0_1_3 | _atomics_a_mask_sub_sub_acc_T_13; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_3 = atomics_a_mask_sub_sub_sub_1_2_3 & atomics_a_mask_sub_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_14 = atomics_a_mask_sub_sub_size_3 & atomics_a_mask_sub_sub_2_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_3 = atomics_a_mask_sub_sub_sub_1_1_3 | _atomics_a_mask_sub_sub_acc_T_14; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_3 = atomics_a_mask_sub_sub_sub_1_2_3 & atomics_a_mask_sub_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_15 = atomics_a_mask_sub_sub_size_3 & atomics_a_mask_sub_sub_3_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_3 = atomics_a_mask_sub_sub_sub_1_1_3 | _atomics_a_mask_sub_sub_acc_T_15; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_3 = atomics_a_mask_sizeOH_3[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_3 = ~atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_3 = atomics_a_mask_sub_sub_0_2_3 & atomics_a_mask_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_24 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_3 = atomics_a_mask_sub_sub_0_1_3 | _atomics_a_mask_sub_acc_T_24; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_3 = atomics_a_mask_sub_sub_0_2_3 & atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_25 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_3 = atomics_a_mask_sub_sub_0_1_3 | _atomics_a_mask_sub_acc_T_25; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_3 = atomics_a_mask_sub_sub_1_2_3 & atomics_a_mask_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_26 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_2_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_3 = atomics_a_mask_sub_sub_1_1_3 | _atomics_a_mask_sub_acc_T_26; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_3 = atomics_a_mask_sub_sub_1_2_3 & atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_27 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_3_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_3 = atomics_a_mask_sub_sub_1_1_3 | _atomics_a_mask_sub_acc_T_27; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_3 = atomics_a_mask_sub_sub_2_2_3 & atomics_a_mask_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_28 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_4_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_3 = atomics_a_mask_sub_sub_2_1_3 | _atomics_a_mask_sub_acc_T_28; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_3 = atomics_a_mask_sub_sub_2_2_3 & atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_29 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_5_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_3 = atomics_a_mask_sub_sub_2_1_3 | _atomics_a_mask_sub_acc_T_29; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_3 = atomics_a_mask_sub_sub_3_2_3 & atomics_a_mask_sub_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_30 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_6_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_3 = atomics_a_mask_sub_sub_3_1_3 | _atomics_a_mask_sub_acc_T_30; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_3 = atomics_a_mask_sub_sub_3_2_3 & atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_31 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_7_2_3; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_3 = atomics_a_mask_sub_sub_3_1_3 | _atomics_a_mask_sub_acc_T_31; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_3 = atomics_a_mask_sizeOH_3[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_3 = ~atomics_a_mask_bit_3; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_48 = atomics_a_mask_sub_0_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_48 = atomics_a_mask_size_3 & atomics_a_mask_eq_48; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_48 = atomics_a_mask_sub_0_1_3 | _atomics_a_mask_acc_T_48; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_49 = atomics_a_mask_sub_0_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_49 = atomics_a_mask_size_3 & atomics_a_mask_eq_49; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_49 = atomics_a_mask_sub_0_1_3 | _atomics_a_mask_acc_T_49; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_50 = atomics_a_mask_sub_1_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_50 = atomics_a_mask_size_3 & atomics_a_mask_eq_50; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_50 = atomics_a_mask_sub_1_1_3 | _atomics_a_mask_acc_T_50; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_51 = atomics_a_mask_sub_1_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_51 = atomics_a_mask_size_3 & atomics_a_mask_eq_51; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_51 = atomics_a_mask_sub_1_1_3 | _atomics_a_mask_acc_T_51; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_52 = atomics_a_mask_sub_2_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_52 = atomics_a_mask_size_3 & atomics_a_mask_eq_52; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_52 = atomics_a_mask_sub_2_1_3 | _atomics_a_mask_acc_T_52; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_53 = atomics_a_mask_sub_2_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_53 = atomics_a_mask_size_3 & atomics_a_mask_eq_53; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_53 = atomics_a_mask_sub_2_1_3 | _atomics_a_mask_acc_T_53; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_54 = atomics_a_mask_sub_3_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_54 = atomics_a_mask_size_3 & atomics_a_mask_eq_54; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_54 = atomics_a_mask_sub_3_1_3 | _atomics_a_mask_acc_T_54; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_55 = atomics_a_mask_sub_3_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_55 = atomics_a_mask_size_3 & atomics_a_mask_eq_55; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_55 = atomics_a_mask_sub_3_1_3 | _atomics_a_mask_acc_T_55; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_56 = atomics_a_mask_sub_4_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_56 = atomics_a_mask_size_3 & atomics_a_mask_eq_56; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_56 = atomics_a_mask_sub_4_1_3 | _atomics_a_mask_acc_T_56; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_57 = atomics_a_mask_sub_4_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_57 = atomics_a_mask_size_3 & atomics_a_mask_eq_57; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_57 = atomics_a_mask_sub_4_1_3 | _atomics_a_mask_acc_T_57; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_58 = atomics_a_mask_sub_5_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_58 = atomics_a_mask_size_3 & atomics_a_mask_eq_58; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_58 = atomics_a_mask_sub_5_1_3 | _atomics_a_mask_acc_T_58; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_59 = atomics_a_mask_sub_5_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_59 = atomics_a_mask_size_3 & atomics_a_mask_eq_59; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_59 = atomics_a_mask_sub_5_1_3 | _atomics_a_mask_acc_T_59; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_60 = atomics_a_mask_sub_6_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_60 = atomics_a_mask_size_3 & atomics_a_mask_eq_60; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_60 = atomics_a_mask_sub_6_1_3 | _atomics_a_mask_acc_T_60; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_61 = atomics_a_mask_sub_6_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_61 = atomics_a_mask_size_3 & atomics_a_mask_eq_61; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_61 = atomics_a_mask_sub_6_1_3 | _atomics_a_mask_acc_T_61; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_62 = atomics_a_mask_sub_7_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_62 = atomics_a_mask_size_3 & atomics_a_mask_eq_62; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_62 = atomics_a_mask_sub_7_1_3 | _atomics_a_mask_acc_T_62; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_63 = atomics_a_mask_sub_7_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_63 = atomics_a_mask_size_3 & atomics_a_mask_eq_63; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_63 = atomics_a_mask_sub_7_1_3 | _atomics_a_mask_acc_T_63; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_3 = {atomics_a_mask_acc_49, atomics_a_mask_acc_48}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_3 = {atomics_a_mask_acc_51, atomics_a_mask_acc_50}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_3 = {atomics_a_mask_lo_lo_hi_3, atomics_a_mask_lo_lo_lo_3}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_3 = {atomics_a_mask_acc_53, atomics_a_mask_acc_52}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_3 = {atomics_a_mask_acc_55, atomics_a_mask_acc_54}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_3 = {atomics_a_mask_lo_hi_hi_3, atomics_a_mask_lo_hi_lo_3}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_3 = {atomics_a_mask_lo_hi_3, atomics_a_mask_lo_lo_3}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_3 = {atomics_a_mask_acc_57, atomics_a_mask_acc_56}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_3 = {atomics_a_mask_acc_59, atomics_a_mask_acc_58}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_3 = {atomics_a_mask_hi_lo_hi_3, atomics_a_mask_hi_lo_lo_3}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_3 = {atomics_a_mask_acc_61, atomics_a_mask_acc_60}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_3 = {atomics_a_mask_acc_63, atomics_a_mask_acc_62}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_3 = {atomics_a_mask_hi_hi_hi_3, atomics_a_mask_hi_hi_lo_3}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_3 = {atomics_a_mask_hi_hi_3, atomics_a_mask_hi_lo_3}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_3 = {atomics_a_mask_hi_3, atomics_a_mask_lo_3}; // @[Misc.scala:222:10] assign atomics_a_3_mask = _atomics_a_mask_T_3; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_241 = {1'h0, _atomics_legal_T_240}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_242 = _atomics_legal_T_241 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_243 = _atomics_legal_T_242; // @[Parameters.scala:137:46] wire _atomics_legal_T_244 = _atomics_legal_T_243 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_246 = {1'h0, _atomics_legal_T_245}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_247 = _atomics_legal_T_246 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_248 = _atomics_legal_T_247; // @[Parameters.scala:137:46] wire _atomics_legal_T_249 = _atomics_legal_T_248 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_251 = {1'h0, _atomics_legal_T_250}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_252 = _atomics_legal_T_251 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_253 = _atomics_legal_T_252; // @[Parameters.scala:137:46] wire _atomics_legal_T_254 = _atomics_legal_T_253 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_256 = {1'h0, _atomics_legal_T_255}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_257 = _atomics_legal_T_256 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_258 = _atomics_legal_T_257; // @[Parameters.scala:137:46] wire _atomics_legal_T_259 = _atomics_legal_T_258 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_261 = {1'h0, _atomics_legal_T_260}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_262 = _atomics_legal_T_261 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_263 = _atomics_legal_T_262; // @[Parameters.scala:137:46] wire _atomics_legal_T_264 = _atomics_legal_T_263 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_265 = _atomics_legal_T_244 | _atomics_legal_T_249; // @[Parameters.scala:685:42] wire _atomics_legal_T_266 = _atomics_legal_T_265 | _atomics_legal_T_254; // @[Parameters.scala:685:42] wire _atomics_legal_T_267 = _atomics_legal_T_266 | _atomics_legal_T_259; // @[Parameters.scala:685:42] wire _atomics_legal_T_268 = _atomics_legal_T_267 | _atomics_legal_T_264; // @[Parameters.scala:685:42] wire _atomics_legal_T_269 = _atomics_legal_T_268; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_293 = _atomics_legal_T_269; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_272 = {1'h0, _atomics_legal_T_271}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_273 = _atomics_legal_T_272 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_274 = _atomics_legal_T_273; // @[Parameters.scala:137:46] wire _atomics_legal_T_275 = _atomics_legal_T_274 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_282 = {1'h0, _atomics_legal_T_281}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_283 = _atomics_legal_T_282 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_284 = _atomics_legal_T_283; // @[Parameters.scala:137:46] wire _atomics_legal_T_285 = _atomics_legal_T_284 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_287 = {1'h0, _atomics_legal_T_286}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_288 = _atomics_legal_T_287 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_289 = _atomics_legal_T_288; // @[Parameters.scala:137:46] wire _atomics_legal_T_290 = _atomics_legal_T_289 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_291 = _atomics_legal_T_285 | _atomics_legal_T_290; // @[Parameters.scala:685:42] wire _atomics_legal_T_292 = _atomics_legal_T_291; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_294 = _atomics_legal_T_293; // @[Parameters.scala:686:26] wire atomics_legal_4 = _atomics_legal_T_294 | _atomics_legal_T_292; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_4; // @[Misc.scala:222:10] wire [15:0] atomics_a_4_mask; // @[Edges.scala:517:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_4 = _atomics_a_mask_sizeOH_T_12[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_13 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_4; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_14 = _atomics_a_mask_sizeOH_T_13; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_4 = {_atomics_a_mask_sizeOH_T_14[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_4 = atomics_a_mask_sizeOH_4[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_4 = atomics_a_mask_sub_sub_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_4 = ~atomics_a_mask_sub_sub_sub_bit_4; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_4 = atomics_a_mask_sub_sub_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_8 = atomics_a_mask_sub_sub_sub_size_4 & atomics_a_mask_sub_sub_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_4 = _atomics_a_mask_sub_sub_sub_acc_T_8; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_9 = atomics_a_mask_sub_sub_sub_size_4 & atomics_a_mask_sub_sub_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_4 = _atomics_a_mask_sub_sub_sub_acc_T_9; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_4 = atomics_a_mask_sizeOH_4[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_4 = ~atomics_a_mask_sub_sub_bit_4; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_4 = atomics_a_mask_sub_sub_sub_0_2_4 & atomics_a_mask_sub_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_16 = atomics_a_mask_sub_sub_size_4 & atomics_a_mask_sub_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_4 = atomics_a_mask_sub_sub_sub_0_1_4 | _atomics_a_mask_sub_sub_acc_T_16; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_4 = atomics_a_mask_sub_sub_sub_0_2_4 & atomics_a_mask_sub_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_17 = atomics_a_mask_sub_sub_size_4 & atomics_a_mask_sub_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_4 = atomics_a_mask_sub_sub_sub_0_1_4 | _atomics_a_mask_sub_sub_acc_T_17; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_4 = atomics_a_mask_sub_sub_sub_1_2_4 & atomics_a_mask_sub_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_18 = atomics_a_mask_sub_sub_size_4 & atomics_a_mask_sub_sub_2_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_4 = atomics_a_mask_sub_sub_sub_1_1_4 | _atomics_a_mask_sub_sub_acc_T_18; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_4 = atomics_a_mask_sub_sub_sub_1_2_4 & atomics_a_mask_sub_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_19 = atomics_a_mask_sub_sub_size_4 & atomics_a_mask_sub_sub_3_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_4 = atomics_a_mask_sub_sub_sub_1_1_4 | _atomics_a_mask_sub_sub_acc_T_19; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_4 = atomics_a_mask_sizeOH_4[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_4 = ~atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_4 = atomics_a_mask_sub_sub_0_2_4 & atomics_a_mask_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_32 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_4 = atomics_a_mask_sub_sub_0_1_4 | _atomics_a_mask_sub_acc_T_32; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_4 = atomics_a_mask_sub_sub_0_2_4 & atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_33 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_4 = atomics_a_mask_sub_sub_0_1_4 | _atomics_a_mask_sub_acc_T_33; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_4 = atomics_a_mask_sub_sub_1_2_4 & atomics_a_mask_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_34 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_2_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_4 = atomics_a_mask_sub_sub_1_1_4 | _atomics_a_mask_sub_acc_T_34; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_4 = atomics_a_mask_sub_sub_1_2_4 & atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_35 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_3_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_4 = atomics_a_mask_sub_sub_1_1_4 | _atomics_a_mask_sub_acc_T_35; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_4 = atomics_a_mask_sub_sub_2_2_4 & atomics_a_mask_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_36 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_4_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_4 = atomics_a_mask_sub_sub_2_1_4 | _atomics_a_mask_sub_acc_T_36; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_4 = atomics_a_mask_sub_sub_2_2_4 & atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_37 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_5_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_4 = atomics_a_mask_sub_sub_2_1_4 | _atomics_a_mask_sub_acc_T_37; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_4 = atomics_a_mask_sub_sub_3_2_4 & atomics_a_mask_sub_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_38 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_6_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_4 = atomics_a_mask_sub_sub_3_1_4 | _atomics_a_mask_sub_acc_T_38; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_4 = atomics_a_mask_sub_sub_3_2_4 & atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_39 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_7_2_4; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_4 = atomics_a_mask_sub_sub_3_1_4 | _atomics_a_mask_sub_acc_T_39; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_4 = atomics_a_mask_sizeOH_4[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_4 = ~atomics_a_mask_bit_4; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_64 = atomics_a_mask_sub_0_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_64 = atomics_a_mask_size_4 & atomics_a_mask_eq_64; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_64 = atomics_a_mask_sub_0_1_4 | _atomics_a_mask_acc_T_64; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_65 = atomics_a_mask_sub_0_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_65 = atomics_a_mask_size_4 & atomics_a_mask_eq_65; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_65 = atomics_a_mask_sub_0_1_4 | _atomics_a_mask_acc_T_65; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_66 = atomics_a_mask_sub_1_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_66 = atomics_a_mask_size_4 & atomics_a_mask_eq_66; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_66 = atomics_a_mask_sub_1_1_4 | _atomics_a_mask_acc_T_66; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_67 = atomics_a_mask_sub_1_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_67 = atomics_a_mask_size_4 & atomics_a_mask_eq_67; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_67 = atomics_a_mask_sub_1_1_4 | _atomics_a_mask_acc_T_67; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_68 = atomics_a_mask_sub_2_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_68 = atomics_a_mask_size_4 & atomics_a_mask_eq_68; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_68 = atomics_a_mask_sub_2_1_4 | _atomics_a_mask_acc_T_68; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_69 = atomics_a_mask_sub_2_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_69 = atomics_a_mask_size_4 & atomics_a_mask_eq_69; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_69 = atomics_a_mask_sub_2_1_4 | _atomics_a_mask_acc_T_69; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_70 = atomics_a_mask_sub_3_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_70 = atomics_a_mask_size_4 & atomics_a_mask_eq_70; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_70 = atomics_a_mask_sub_3_1_4 | _atomics_a_mask_acc_T_70; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_71 = atomics_a_mask_sub_3_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_71 = atomics_a_mask_size_4 & atomics_a_mask_eq_71; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_71 = atomics_a_mask_sub_3_1_4 | _atomics_a_mask_acc_T_71; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_72 = atomics_a_mask_sub_4_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_72 = atomics_a_mask_size_4 & atomics_a_mask_eq_72; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_72 = atomics_a_mask_sub_4_1_4 | _atomics_a_mask_acc_T_72; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_73 = atomics_a_mask_sub_4_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_73 = atomics_a_mask_size_4 & atomics_a_mask_eq_73; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_73 = atomics_a_mask_sub_4_1_4 | _atomics_a_mask_acc_T_73; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_74 = atomics_a_mask_sub_5_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_74 = atomics_a_mask_size_4 & atomics_a_mask_eq_74; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_74 = atomics_a_mask_sub_5_1_4 | _atomics_a_mask_acc_T_74; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_75 = atomics_a_mask_sub_5_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_75 = atomics_a_mask_size_4 & atomics_a_mask_eq_75; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_75 = atomics_a_mask_sub_5_1_4 | _atomics_a_mask_acc_T_75; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_76 = atomics_a_mask_sub_6_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_76 = atomics_a_mask_size_4 & atomics_a_mask_eq_76; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_76 = atomics_a_mask_sub_6_1_4 | _atomics_a_mask_acc_T_76; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_77 = atomics_a_mask_sub_6_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_77 = atomics_a_mask_size_4 & atomics_a_mask_eq_77; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_77 = atomics_a_mask_sub_6_1_4 | _atomics_a_mask_acc_T_77; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_78 = atomics_a_mask_sub_7_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_78 = atomics_a_mask_size_4 & atomics_a_mask_eq_78; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_78 = atomics_a_mask_sub_7_1_4 | _atomics_a_mask_acc_T_78; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_79 = atomics_a_mask_sub_7_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_79 = atomics_a_mask_size_4 & atomics_a_mask_eq_79; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_79 = atomics_a_mask_sub_7_1_4 | _atomics_a_mask_acc_T_79; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_4 = {atomics_a_mask_acc_65, atomics_a_mask_acc_64}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_4 = {atomics_a_mask_acc_67, atomics_a_mask_acc_66}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_4 = {atomics_a_mask_lo_lo_hi_4, atomics_a_mask_lo_lo_lo_4}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_4 = {atomics_a_mask_acc_69, atomics_a_mask_acc_68}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_4 = {atomics_a_mask_acc_71, atomics_a_mask_acc_70}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_4 = {atomics_a_mask_lo_hi_hi_4, atomics_a_mask_lo_hi_lo_4}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_4 = {atomics_a_mask_lo_hi_4, atomics_a_mask_lo_lo_4}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_4 = {atomics_a_mask_acc_73, atomics_a_mask_acc_72}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_4 = {atomics_a_mask_acc_75, atomics_a_mask_acc_74}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_4 = {atomics_a_mask_hi_lo_hi_4, atomics_a_mask_hi_lo_lo_4}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_4 = {atomics_a_mask_acc_77, atomics_a_mask_acc_76}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_4 = {atomics_a_mask_acc_79, atomics_a_mask_acc_78}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_4 = {atomics_a_mask_hi_hi_hi_4, atomics_a_mask_hi_hi_lo_4}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_4 = {atomics_a_mask_hi_hi_4, atomics_a_mask_hi_lo_4}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_4 = {atomics_a_mask_hi_4, atomics_a_mask_lo_4}; // @[Misc.scala:222:10] assign atomics_a_4_mask = _atomics_a_mask_T_4; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_300 = {1'h0, _atomics_legal_T_299}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_301 = _atomics_legal_T_300 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_302 = _atomics_legal_T_301; // @[Parameters.scala:137:46] wire _atomics_legal_T_303 = _atomics_legal_T_302 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_305 = {1'h0, _atomics_legal_T_304}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_306 = _atomics_legal_T_305 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_307 = _atomics_legal_T_306; // @[Parameters.scala:137:46] wire _atomics_legal_T_308 = _atomics_legal_T_307 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_310 = {1'h0, _atomics_legal_T_309}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_311 = _atomics_legal_T_310 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_312 = _atomics_legal_T_311; // @[Parameters.scala:137:46] wire _atomics_legal_T_313 = _atomics_legal_T_312 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_315 = {1'h0, _atomics_legal_T_314}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_316 = _atomics_legal_T_315 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_317 = _atomics_legal_T_316; // @[Parameters.scala:137:46] wire _atomics_legal_T_318 = _atomics_legal_T_317 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_320 = {1'h0, _atomics_legal_T_319}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_321 = _atomics_legal_T_320 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_322 = _atomics_legal_T_321; // @[Parameters.scala:137:46] wire _atomics_legal_T_323 = _atomics_legal_T_322 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_324 = _atomics_legal_T_303 | _atomics_legal_T_308; // @[Parameters.scala:685:42] wire _atomics_legal_T_325 = _atomics_legal_T_324 | _atomics_legal_T_313; // @[Parameters.scala:685:42] wire _atomics_legal_T_326 = _atomics_legal_T_325 | _atomics_legal_T_318; // @[Parameters.scala:685:42] wire _atomics_legal_T_327 = _atomics_legal_T_326 | _atomics_legal_T_323; // @[Parameters.scala:685:42] wire _atomics_legal_T_328 = _atomics_legal_T_327; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_352 = _atomics_legal_T_328; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_331 = {1'h0, _atomics_legal_T_330}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_332 = _atomics_legal_T_331 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_333 = _atomics_legal_T_332; // @[Parameters.scala:137:46] wire _atomics_legal_T_334 = _atomics_legal_T_333 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_341 = {1'h0, _atomics_legal_T_340}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_342 = _atomics_legal_T_341 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_343 = _atomics_legal_T_342; // @[Parameters.scala:137:46] wire _atomics_legal_T_344 = _atomics_legal_T_343 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_346 = {1'h0, _atomics_legal_T_345}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_347 = _atomics_legal_T_346 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_348 = _atomics_legal_T_347; // @[Parameters.scala:137:46] wire _atomics_legal_T_349 = _atomics_legal_T_348 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_350 = _atomics_legal_T_344 | _atomics_legal_T_349; // @[Parameters.scala:685:42] wire _atomics_legal_T_351 = _atomics_legal_T_350; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_353 = _atomics_legal_T_352; // @[Parameters.scala:686:26] wire atomics_legal_5 = _atomics_legal_T_353 | _atomics_legal_T_351; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_5; // @[Misc.scala:222:10] wire [15:0] atomics_a_5_mask; // @[Edges.scala:517:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_5 = _atomics_a_mask_sizeOH_T_15[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_16 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_5; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_17 = _atomics_a_mask_sizeOH_T_16; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_5 = {_atomics_a_mask_sizeOH_T_17[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_5 = atomics_a_mask_sizeOH_5[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_5 = atomics_a_mask_sub_sub_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_5 = ~atomics_a_mask_sub_sub_sub_bit_5; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_5 = atomics_a_mask_sub_sub_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_10 = atomics_a_mask_sub_sub_sub_size_5 & atomics_a_mask_sub_sub_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_5 = _atomics_a_mask_sub_sub_sub_acc_T_10; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_11 = atomics_a_mask_sub_sub_sub_size_5 & atomics_a_mask_sub_sub_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_5 = _atomics_a_mask_sub_sub_sub_acc_T_11; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_5 = atomics_a_mask_sizeOH_5[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_5 = ~atomics_a_mask_sub_sub_bit_5; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_5 = atomics_a_mask_sub_sub_sub_0_2_5 & atomics_a_mask_sub_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_20 = atomics_a_mask_sub_sub_size_5 & atomics_a_mask_sub_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_5 = atomics_a_mask_sub_sub_sub_0_1_5 | _atomics_a_mask_sub_sub_acc_T_20; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_5 = atomics_a_mask_sub_sub_sub_0_2_5 & atomics_a_mask_sub_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_21 = atomics_a_mask_sub_sub_size_5 & atomics_a_mask_sub_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_5 = atomics_a_mask_sub_sub_sub_0_1_5 | _atomics_a_mask_sub_sub_acc_T_21; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_5 = atomics_a_mask_sub_sub_sub_1_2_5 & atomics_a_mask_sub_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_22 = atomics_a_mask_sub_sub_size_5 & atomics_a_mask_sub_sub_2_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_5 = atomics_a_mask_sub_sub_sub_1_1_5 | _atomics_a_mask_sub_sub_acc_T_22; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_5 = atomics_a_mask_sub_sub_sub_1_2_5 & atomics_a_mask_sub_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_23 = atomics_a_mask_sub_sub_size_5 & atomics_a_mask_sub_sub_3_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_5 = atomics_a_mask_sub_sub_sub_1_1_5 | _atomics_a_mask_sub_sub_acc_T_23; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_5 = atomics_a_mask_sizeOH_5[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_5 = ~atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_5 = atomics_a_mask_sub_sub_0_2_5 & atomics_a_mask_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_40 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_5 = atomics_a_mask_sub_sub_0_1_5 | _atomics_a_mask_sub_acc_T_40; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_5 = atomics_a_mask_sub_sub_0_2_5 & atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_41 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_5 = atomics_a_mask_sub_sub_0_1_5 | _atomics_a_mask_sub_acc_T_41; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_5 = atomics_a_mask_sub_sub_1_2_5 & atomics_a_mask_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_42 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_2_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_5 = atomics_a_mask_sub_sub_1_1_5 | _atomics_a_mask_sub_acc_T_42; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_5 = atomics_a_mask_sub_sub_1_2_5 & atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_43 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_3_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_5 = atomics_a_mask_sub_sub_1_1_5 | _atomics_a_mask_sub_acc_T_43; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_5 = atomics_a_mask_sub_sub_2_2_5 & atomics_a_mask_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_44 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_4_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_5 = atomics_a_mask_sub_sub_2_1_5 | _atomics_a_mask_sub_acc_T_44; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_5 = atomics_a_mask_sub_sub_2_2_5 & atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_45 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_5_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_5 = atomics_a_mask_sub_sub_2_1_5 | _atomics_a_mask_sub_acc_T_45; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_5 = atomics_a_mask_sub_sub_3_2_5 & atomics_a_mask_sub_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_46 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_6_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_5 = atomics_a_mask_sub_sub_3_1_5 | _atomics_a_mask_sub_acc_T_46; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_5 = atomics_a_mask_sub_sub_3_2_5 & atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_47 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_7_2_5; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_5 = atomics_a_mask_sub_sub_3_1_5 | _atomics_a_mask_sub_acc_T_47; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_5 = atomics_a_mask_sizeOH_5[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_5 = ~atomics_a_mask_bit_5; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_80 = atomics_a_mask_sub_0_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_80 = atomics_a_mask_size_5 & atomics_a_mask_eq_80; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_80 = atomics_a_mask_sub_0_1_5 | _atomics_a_mask_acc_T_80; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_81 = atomics_a_mask_sub_0_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_81 = atomics_a_mask_size_5 & atomics_a_mask_eq_81; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_81 = atomics_a_mask_sub_0_1_5 | _atomics_a_mask_acc_T_81; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_82 = atomics_a_mask_sub_1_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_82 = atomics_a_mask_size_5 & atomics_a_mask_eq_82; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_82 = atomics_a_mask_sub_1_1_5 | _atomics_a_mask_acc_T_82; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_83 = atomics_a_mask_sub_1_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_83 = atomics_a_mask_size_5 & atomics_a_mask_eq_83; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_83 = atomics_a_mask_sub_1_1_5 | _atomics_a_mask_acc_T_83; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_84 = atomics_a_mask_sub_2_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_84 = atomics_a_mask_size_5 & atomics_a_mask_eq_84; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_84 = atomics_a_mask_sub_2_1_5 | _atomics_a_mask_acc_T_84; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_85 = atomics_a_mask_sub_2_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_85 = atomics_a_mask_size_5 & atomics_a_mask_eq_85; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_85 = atomics_a_mask_sub_2_1_5 | _atomics_a_mask_acc_T_85; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_86 = atomics_a_mask_sub_3_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_86 = atomics_a_mask_size_5 & atomics_a_mask_eq_86; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_86 = atomics_a_mask_sub_3_1_5 | _atomics_a_mask_acc_T_86; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_87 = atomics_a_mask_sub_3_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_87 = atomics_a_mask_size_5 & atomics_a_mask_eq_87; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_87 = atomics_a_mask_sub_3_1_5 | _atomics_a_mask_acc_T_87; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_88 = atomics_a_mask_sub_4_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_88 = atomics_a_mask_size_5 & atomics_a_mask_eq_88; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_88 = atomics_a_mask_sub_4_1_5 | _atomics_a_mask_acc_T_88; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_89 = atomics_a_mask_sub_4_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_89 = atomics_a_mask_size_5 & atomics_a_mask_eq_89; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_89 = atomics_a_mask_sub_4_1_5 | _atomics_a_mask_acc_T_89; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_90 = atomics_a_mask_sub_5_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_90 = atomics_a_mask_size_5 & atomics_a_mask_eq_90; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_90 = atomics_a_mask_sub_5_1_5 | _atomics_a_mask_acc_T_90; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_91 = atomics_a_mask_sub_5_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_91 = atomics_a_mask_size_5 & atomics_a_mask_eq_91; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_91 = atomics_a_mask_sub_5_1_5 | _atomics_a_mask_acc_T_91; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_92 = atomics_a_mask_sub_6_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_92 = atomics_a_mask_size_5 & atomics_a_mask_eq_92; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_92 = atomics_a_mask_sub_6_1_5 | _atomics_a_mask_acc_T_92; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_93 = atomics_a_mask_sub_6_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_93 = atomics_a_mask_size_5 & atomics_a_mask_eq_93; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_93 = atomics_a_mask_sub_6_1_5 | _atomics_a_mask_acc_T_93; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_94 = atomics_a_mask_sub_7_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_94 = atomics_a_mask_size_5 & atomics_a_mask_eq_94; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_94 = atomics_a_mask_sub_7_1_5 | _atomics_a_mask_acc_T_94; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_95 = atomics_a_mask_sub_7_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_95 = atomics_a_mask_size_5 & atomics_a_mask_eq_95; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_95 = atomics_a_mask_sub_7_1_5 | _atomics_a_mask_acc_T_95; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_5 = {atomics_a_mask_acc_81, atomics_a_mask_acc_80}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_5 = {atomics_a_mask_acc_83, atomics_a_mask_acc_82}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_5 = {atomics_a_mask_lo_lo_hi_5, atomics_a_mask_lo_lo_lo_5}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_5 = {atomics_a_mask_acc_85, atomics_a_mask_acc_84}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_5 = {atomics_a_mask_acc_87, atomics_a_mask_acc_86}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_5 = {atomics_a_mask_lo_hi_hi_5, atomics_a_mask_lo_hi_lo_5}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_5 = {atomics_a_mask_lo_hi_5, atomics_a_mask_lo_lo_5}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_5 = {atomics_a_mask_acc_89, atomics_a_mask_acc_88}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_5 = {atomics_a_mask_acc_91, atomics_a_mask_acc_90}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_5 = {atomics_a_mask_hi_lo_hi_5, atomics_a_mask_hi_lo_lo_5}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_5 = {atomics_a_mask_acc_93, atomics_a_mask_acc_92}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_5 = {atomics_a_mask_acc_95, atomics_a_mask_acc_94}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_5 = {atomics_a_mask_hi_hi_hi_5, atomics_a_mask_hi_hi_lo_5}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_5 = {atomics_a_mask_hi_hi_5, atomics_a_mask_hi_lo_5}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_5 = {atomics_a_mask_hi_5, atomics_a_mask_lo_5}; // @[Misc.scala:222:10] assign atomics_a_5_mask = _atomics_a_mask_T_5; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_359 = {1'h0, _atomics_legal_T_358}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_360 = _atomics_legal_T_359 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_361 = _atomics_legal_T_360; // @[Parameters.scala:137:46] wire _atomics_legal_T_362 = _atomics_legal_T_361 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_364 = {1'h0, _atomics_legal_T_363}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_365 = _atomics_legal_T_364 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_366 = _atomics_legal_T_365; // @[Parameters.scala:137:46] wire _atomics_legal_T_367 = _atomics_legal_T_366 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_369 = {1'h0, _atomics_legal_T_368}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_370 = _atomics_legal_T_369 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_371 = _atomics_legal_T_370; // @[Parameters.scala:137:46] wire _atomics_legal_T_372 = _atomics_legal_T_371 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_374 = {1'h0, _atomics_legal_T_373}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_375 = _atomics_legal_T_374 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_376 = _atomics_legal_T_375; // @[Parameters.scala:137:46] wire _atomics_legal_T_377 = _atomics_legal_T_376 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_379 = {1'h0, _atomics_legal_T_378}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_380 = _atomics_legal_T_379 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_381 = _atomics_legal_T_380; // @[Parameters.scala:137:46] wire _atomics_legal_T_382 = _atomics_legal_T_381 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_383 = _atomics_legal_T_362 | _atomics_legal_T_367; // @[Parameters.scala:685:42] wire _atomics_legal_T_384 = _atomics_legal_T_383 | _atomics_legal_T_372; // @[Parameters.scala:685:42] wire _atomics_legal_T_385 = _atomics_legal_T_384 | _atomics_legal_T_377; // @[Parameters.scala:685:42] wire _atomics_legal_T_386 = _atomics_legal_T_385 | _atomics_legal_T_382; // @[Parameters.scala:685:42] wire _atomics_legal_T_387 = _atomics_legal_T_386; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_411 = _atomics_legal_T_387; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_390 = {1'h0, _atomics_legal_T_389}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_391 = _atomics_legal_T_390 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_392 = _atomics_legal_T_391; // @[Parameters.scala:137:46] wire _atomics_legal_T_393 = _atomics_legal_T_392 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_400 = {1'h0, _atomics_legal_T_399}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_401 = _atomics_legal_T_400 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_402 = _atomics_legal_T_401; // @[Parameters.scala:137:46] wire _atomics_legal_T_403 = _atomics_legal_T_402 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_405 = {1'h0, _atomics_legal_T_404}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_406 = _atomics_legal_T_405 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_407 = _atomics_legal_T_406; // @[Parameters.scala:137:46] wire _atomics_legal_T_408 = _atomics_legal_T_407 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_409 = _atomics_legal_T_403 | _atomics_legal_T_408; // @[Parameters.scala:685:42] wire _atomics_legal_T_410 = _atomics_legal_T_409; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_412 = _atomics_legal_T_411; // @[Parameters.scala:686:26] wire atomics_legal_6 = _atomics_legal_T_412 | _atomics_legal_T_410; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_6; // @[Misc.scala:222:10] wire [15:0] atomics_a_6_mask; // @[Edges.scala:517:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_6 = _atomics_a_mask_sizeOH_T_18[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_19 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_6; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_20 = _atomics_a_mask_sizeOH_T_19; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_6 = {_atomics_a_mask_sizeOH_T_20[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_6 = atomics_a_mask_sizeOH_6[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_6 = atomics_a_mask_sub_sub_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_6 = ~atomics_a_mask_sub_sub_sub_bit_6; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_6 = atomics_a_mask_sub_sub_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_12 = atomics_a_mask_sub_sub_sub_size_6 & atomics_a_mask_sub_sub_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_6 = _atomics_a_mask_sub_sub_sub_acc_T_12; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_13 = atomics_a_mask_sub_sub_sub_size_6 & atomics_a_mask_sub_sub_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_6 = _atomics_a_mask_sub_sub_sub_acc_T_13; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_6 = atomics_a_mask_sizeOH_6[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_6 = ~atomics_a_mask_sub_sub_bit_6; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_6 = atomics_a_mask_sub_sub_sub_0_2_6 & atomics_a_mask_sub_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_24 = atomics_a_mask_sub_sub_size_6 & atomics_a_mask_sub_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_6 = atomics_a_mask_sub_sub_sub_0_1_6 | _atomics_a_mask_sub_sub_acc_T_24; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_6 = atomics_a_mask_sub_sub_sub_0_2_6 & atomics_a_mask_sub_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_25 = atomics_a_mask_sub_sub_size_6 & atomics_a_mask_sub_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_6 = atomics_a_mask_sub_sub_sub_0_1_6 | _atomics_a_mask_sub_sub_acc_T_25; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_6 = atomics_a_mask_sub_sub_sub_1_2_6 & atomics_a_mask_sub_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_26 = atomics_a_mask_sub_sub_size_6 & atomics_a_mask_sub_sub_2_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_6 = atomics_a_mask_sub_sub_sub_1_1_6 | _atomics_a_mask_sub_sub_acc_T_26; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_6 = atomics_a_mask_sub_sub_sub_1_2_6 & atomics_a_mask_sub_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_27 = atomics_a_mask_sub_sub_size_6 & atomics_a_mask_sub_sub_3_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_6 = atomics_a_mask_sub_sub_sub_1_1_6 | _atomics_a_mask_sub_sub_acc_T_27; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_6 = atomics_a_mask_sizeOH_6[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_6 = ~atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_6 = atomics_a_mask_sub_sub_0_2_6 & atomics_a_mask_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_48 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_6 = atomics_a_mask_sub_sub_0_1_6 | _atomics_a_mask_sub_acc_T_48; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_6 = atomics_a_mask_sub_sub_0_2_6 & atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_49 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_6 = atomics_a_mask_sub_sub_0_1_6 | _atomics_a_mask_sub_acc_T_49; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_6 = atomics_a_mask_sub_sub_1_2_6 & atomics_a_mask_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_50 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_2_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_6 = atomics_a_mask_sub_sub_1_1_6 | _atomics_a_mask_sub_acc_T_50; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_6 = atomics_a_mask_sub_sub_1_2_6 & atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_51 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_3_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_6 = atomics_a_mask_sub_sub_1_1_6 | _atomics_a_mask_sub_acc_T_51; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_6 = atomics_a_mask_sub_sub_2_2_6 & atomics_a_mask_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_52 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_4_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_6 = atomics_a_mask_sub_sub_2_1_6 | _atomics_a_mask_sub_acc_T_52; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_6 = atomics_a_mask_sub_sub_2_2_6 & atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_53 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_5_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_6 = atomics_a_mask_sub_sub_2_1_6 | _atomics_a_mask_sub_acc_T_53; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_6 = atomics_a_mask_sub_sub_3_2_6 & atomics_a_mask_sub_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_54 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_6_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_6 = atomics_a_mask_sub_sub_3_1_6 | _atomics_a_mask_sub_acc_T_54; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_6 = atomics_a_mask_sub_sub_3_2_6 & atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_55 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_7_2_6; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_6 = atomics_a_mask_sub_sub_3_1_6 | _atomics_a_mask_sub_acc_T_55; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_6 = atomics_a_mask_sizeOH_6[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_6 = ~atomics_a_mask_bit_6; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_96 = atomics_a_mask_sub_0_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_96 = atomics_a_mask_size_6 & atomics_a_mask_eq_96; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_96 = atomics_a_mask_sub_0_1_6 | _atomics_a_mask_acc_T_96; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_97 = atomics_a_mask_sub_0_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_97 = atomics_a_mask_size_6 & atomics_a_mask_eq_97; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_97 = atomics_a_mask_sub_0_1_6 | _atomics_a_mask_acc_T_97; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_98 = atomics_a_mask_sub_1_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_98 = atomics_a_mask_size_6 & atomics_a_mask_eq_98; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_98 = atomics_a_mask_sub_1_1_6 | _atomics_a_mask_acc_T_98; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_99 = atomics_a_mask_sub_1_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_99 = atomics_a_mask_size_6 & atomics_a_mask_eq_99; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_99 = atomics_a_mask_sub_1_1_6 | _atomics_a_mask_acc_T_99; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_100 = atomics_a_mask_sub_2_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_100 = atomics_a_mask_size_6 & atomics_a_mask_eq_100; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_100 = atomics_a_mask_sub_2_1_6 | _atomics_a_mask_acc_T_100; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_101 = atomics_a_mask_sub_2_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_101 = atomics_a_mask_size_6 & atomics_a_mask_eq_101; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_101 = atomics_a_mask_sub_2_1_6 | _atomics_a_mask_acc_T_101; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_102 = atomics_a_mask_sub_3_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_102 = atomics_a_mask_size_6 & atomics_a_mask_eq_102; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_102 = atomics_a_mask_sub_3_1_6 | _atomics_a_mask_acc_T_102; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_103 = atomics_a_mask_sub_3_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_103 = atomics_a_mask_size_6 & atomics_a_mask_eq_103; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_103 = atomics_a_mask_sub_3_1_6 | _atomics_a_mask_acc_T_103; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_104 = atomics_a_mask_sub_4_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_104 = atomics_a_mask_size_6 & atomics_a_mask_eq_104; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_104 = atomics_a_mask_sub_4_1_6 | _atomics_a_mask_acc_T_104; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_105 = atomics_a_mask_sub_4_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_105 = atomics_a_mask_size_6 & atomics_a_mask_eq_105; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_105 = atomics_a_mask_sub_4_1_6 | _atomics_a_mask_acc_T_105; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_106 = atomics_a_mask_sub_5_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_106 = atomics_a_mask_size_6 & atomics_a_mask_eq_106; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_106 = atomics_a_mask_sub_5_1_6 | _atomics_a_mask_acc_T_106; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_107 = atomics_a_mask_sub_5_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_107 = atomics_a_mask_size_6 & atomics_a_mask_eq_107; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_107 = atomics_a_mask_sub_5_1_6 | _atomics_a_mask_acc_T_107; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_108 = atomics_a_mask_sub_6_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_108 = atomics_a_mask_size_6 & atomics_a_mask_eq_108; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_108 = atomics_a_mask_sub_6_1_6 | _atomics_a_mask_acc_T_108; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_109 = atomics_a_mask_sub_6_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_109 = atomics_a_mask_size_6 & atomics_a_mask_eq_109; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_109 = atomics_a_mask_sub_6_1_6 | _atomics_a_mask_acc_T_109; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_110 = atomics_a_mask_sub_7_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_110 = atomics_a_mask_size_6 & atomics_a_mask_eq_110; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_110 = atomics_a_mask_sub_7_1_6 | _atomics_a_mask_acc_T_110; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_111 = atomics_a_mask_sub_7_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_111 = atomics_a_mask_size_6 & atomics_a_mask_eq_111; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_111 = atomics_a_mask_sub_7_1_6 | _atomics_a_mask_acc_T_111; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_6 = {atomics_a_mask_acc_97, atomics_a_mask_acc_96}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_6 = {atomics_a_mask_acc_99, atomics_a_mask_acc_98}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_6 = {atomics_a_mask_lo_lo_hi_6, atomics_a_mask_lo_lo_lo_6}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_6 = {atomics_a_mask_acc_101, atomics_a_mask_acc_100}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_6 = {atomics_a_mask_acc_103, atomics_a_mask_acc_102}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_6 = {atomics_a_mask_lo_hi_hi_6, atomics_a_mask_lo_hi_lo_6}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_6 = {atomics_a_mask_lo_hi_6, atomics_a_mask_lo_lo_6}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_6 = {atomics_a_mask_acc_105, atomics_a_mask_acc_104}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_6 = {atomics_a_mask_acc_107, atomics_a_mask_acc_106}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_6 = {atomics_a_mask_hi_lo_hi_6, atomics_a_mask_hi_lo_lo_6}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_6 = {atomics_a_mask_acc_109, atomics_a_mask_acc_108}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_6 = {atomics_a_mask_acc_111, atomics_a_mask_acc_110}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_6 = {atomics_a_mask_hi_hi_hi_6, atomics_a_mask_hi_hi_lo_6}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_6 = {atomics_a_mask_hi_hi_6, atomics_a_mask_hi_lo_6}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_6 = {atomics_a_mask_hi_6, atomics_a_mask_lo_6}; // @[Misc.scala:222:10] assign atomics_a_6_mask = _atomics_a_mask_T_6; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_418 = {1'h0, _atomics_legal_T_417}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_419 = _atomics_legal_T_418 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_420 = _atomics_legal_T_419; // @[Parameters.scala:137:46] wire _atomics_legal_T_421 = _atomics_legal_T_420 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_423 = {1'h0, _atomics_legal_T_422}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_424 = _atomics_legal_T_423 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_425 = _atomics_legal_T_424; // @[Parameters.scala:137:46] wire _atomics_legal_T_426 = _atomics_legal_T_425 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_428 = {1'h0, _atomics_legal_T_427}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_429 = _atomics_legal_T_428 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_430 = _atomics_legal_T_429; // @[Parameters.scala:137:46] wire _atomics_legal_T_431 = _atomics_legal_T_430 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_433 = {1'h0, _atomics_legal_T_432}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_434 = _atomics_legal_T_433 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_435 = _atomics_legal_T_434; // @[Parameters.scala:137:46] wire _atomics_legal_T_436 = _atomics_legal_T_435 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_438 = {1'h0, _atomics_legal_T_437}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_439 = _atomics_legal_T_438 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_440 = _atomics_legal_T_439; // @[Parameters.scala:137:46] wire _atomics_legal_T_441 = _atomics_legal_T_440 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_442 = _atomics_legal_T_421 | _atomics_legal_T_426; // @[Parameters.scala:685:42] wire _atomics_legal_T_443 = _atomics_legal_T_442 | _atomics_legal_T_431; // @[Parameters.scala:685:42] wire _atomics_legal_T_444 = _atomics_legal_T_443 | _atomics_legal_T_436; // @[Parameters.scala:685:42] wire _atomics_legal_T_445 = _atomics_legal_T_444 | _atomics_legal_T_441; // @[Parameters.scala:685:42] wire _atomics_legal_T_446 = _atomics_legal_T_445; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_470 = _atomics_legal_T_446; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_449 = {1'h0, _atomics_legal_T_448}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_450 = _atomics_legal_T_449 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_451 = _atomics_legal_T_450; // @[Parameters.scala:137:46] wire _atomics_legal_T_452 = _atomics_legal_T_451 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_459 = {1'h0, _atomics_legal_T_458}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_460 = _atomics_legal_T_459 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_461 = _atomics_legal_T_460; // @[Parameters.scala:137:46] wire _atomics_legal_T_462 = _atomics_legal_T_461 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_464 = {1'h0, _atomics_legal_T_463}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_465 = _atomics_legal_T_464 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_466 = _atomics_legal_T_465; // @[Parameters.scala:137:46] wire _atomics_legal_T_467 = _atomics_legal_T_466 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_468 = _atomics_legal_T_462 | _atomics_legal_T_467; // @[Parameters.scala:685:42] wire _atomics_legal_T_469 = _atomics_legal_T_468; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_471 = _atomics_legal_T_470; // @[Parameters.scala:686:26] wire atomics_legal_7 = _atomics_legal_T_471 | _atomics_legal_T_469; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_7; // @[Misc.scala:222:10] wire [15:0] atomics_a_7_mask; // @[Edges.scala:517:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_7 = _atomics_a_mask_sizeOH_T_21[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_22 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_7; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_23 = _atomics_a_mask_sizeOH_T_22; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_7 = {_atomics_a_mask_sizeOH_T_23[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_7 = atomics_a_mask_sizeOH_7[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_7 = atomics_a_mask_sub_sub_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_7 = ~atomics_a_mask_sub_sub_sub_bit_7; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_7 = atomics_a_mask_sub_sub_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_14 = atomics_a_mask_sub_sub_sub_size_7 & atomics_a_mask_sub_sub_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_7 = _atomics_a_mask_sub_sub_sub_acc_T_14; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_15 = atomics_a_mask_sub_sub_sub_size_7 & atomics_a_mask_sub_sub_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_7 = _atomics_a_mask_sub_sub_sub_acc_T_15; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_7 = atomics_a_mask_sizeOH_7[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_7 = ~atomics_a_mask_sub_sub_bit_7; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_7 = atomics_a_mask_sub_sub_sub_0_2_7 & atomics_a_mask_sub_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_28 = atomics_a_mask_sub_sub_size_7 & atomics_a_mask_sub_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_7 = atomics_a_mask_sub_sub_sub_0_1_7 | _atomics_a_mask_sub_sub_acc_T_28; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_7 = atomics_a_mask_sub_sub_sub_0_2_7 & atomics_a_mask_sub_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_29 = atomics_a_mask_sub_sub_size_7 & atomics_a_mask_sub_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_7 = atomics_a_mask_sub_sub_sub_0_1_7 | _atomics_a_mask_sub_sub_acc_T_29; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_7 = atomics_a_mask_sub_sub_sub_1_2_7 & atomics_a_mask_sub_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_30 = atomics_a_mask_sub_sub_size_7 & atomics_a_mask_sub_sub_2_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_7 = atomics_a_mask_sub_sub_sub_1_1_7 | _atomics_a_mask_sub_sub_acc_T_30; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_7 = atomics_a_mask_sub_sub_sub_1_2_7 & atomics_a_mask_sub_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_31 = atomics_a_mask_sub_sub_size_7 & atomics_a_mask_sub_sub_3_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_7 = atomics_a_mask_sub_sub_sub_1_1_7 | _atomics_a_mask_sub_sub_acc_T_31; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_7 = atomics_a_mask_sizeOH_7[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_7 = ~atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_7 = atomics_a_mask_sub_sub_0_2_7 & atomics_a_mask_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_56 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_7 = atomics_a_mask_sub_sub_0_1_7 | _atomics_a_mask_sub_acc_T_56; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_7 = atomics_a_mask_sub_sub_0_2_7 & atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_57 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_7 = atomics_a_mask_sub_sub_0_1_7 | _atomics_a_mask_sub_acc_T_57; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_7 = atomics_a_mask_sub_sub_1_2_7 & atomics_a_mask_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_58 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_2_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_7 = atomics_a_mask_sub_sub_1_1_7 | _atomics_a_mask_sub_acc_T_58; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_7 = atomics_a_mask_sub_sub_1_2_7 & atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_59 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_3_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_7 = atomics_a_mask_sub_sub_1_1_7 | _atomics_a_mask_sub_acc_T_59; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_7 = atomics_a_mask_sub_sub_2_2_7 & atomics_a_mask_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_60 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_4_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_7 = atomics_a_mask_sub_sub_2_1_7 | _atomics_a_mask_sub_acc_T_60; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_7 = atomics_a_mask_sub_sub_2_2_7 & atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_61 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_5_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_7 = atomics_a_mask_sub_sub_2_1_7 | _atomics_a_mask_sub_acc_T_61; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_7 = atomics_a_mask_sub_sub_3_2_7 & atomics_a_mask_sub_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_62 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_6_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_7 = atomics_a_mask_sub_sub_3_1_7 | _atomics_a_mask_sub_acc_T_62; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_7 = atomics_a_mask_sub_sub_3_2_7 & atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_63 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_7_2_7; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_7 = atomics_a_mask_sub_sub_3_1_7 | _atomics_a_mask_sub_acc_T_63; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_7 = atomics_a_mask_sizeOH_7[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_7 = ~atomics_a_mask_bit_7; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_112 = atomics_a_mask_sub_0_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_112 = atomics_a_mask_size_7 & atomics_a_mask_eq_112; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_112 = atomics_a_mask_sub_0_1_7 | _atomics_a_mask_acc_T_112; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_113 = atomics_a_mask_sub_0_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_113 = atomics_a_mask_size_7 & atomics_a_mask_eq_113; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_113 = atomics_a_mask_sub_0_1_7 | _atomics_a_mask_acc_T_113; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_114 = atomics_a_mask_sub_1_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_114 = atomics_a_mask_size_7 & atomics_a_mask_eq_114; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_114 = atomics_a_mask_sub_1_1_7 | _atomics_a_mask_acc_T_114; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_115 = atomics_a_mask_sub_1_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_115 = atomics_a_mask_size_7 & atomics_a_mask_eq_115; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_115 = atomics_a_mask_sub_1_1_7 | _atomics_a_mask_acc_T_115; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_116 = atomics_a_mask_sub_2_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_116 = atomics_a_mask_size_7 & atomics_a_mask_eq_116; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_116 = atomics_a_mask_sub_2_1_7 | _atomics_a_mask_acc_T_116; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_117 = atomics_a_mask_sub_2_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_117 = atomics_a_mask_size_7 & atomics_a_mask_eq_117; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_117 = atomics_a_mask_sub_2_1_7 | _atomics_a_mask_acc_T_117; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_118 = atomics_a_mask_sub_3_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_118 = atomics_a_mask_size_7 & atomics_a_mask_eq_118; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_118 = atomics_a_mask_sub_3_1_7 | _atomics_a_mask_acc_T_118; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_119 = atomics_a_mask_sub_3_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_119 = atomics_a_mask_size_7 & atomics_a_mask_eq_119; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_119 = atomics_a_mask_sub_3_1_7 | _atomics_a_mask_acc_T_119; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_120 = atomics_a_mask_sub_4_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_120 = atomics_a_mask_size_7 & atomics_a_mask_eq_120; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_120 = atomics_a_mask_sub_4_1_7 | _atomics_a_mask_acc_T_120; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_121 = atomics_a_mask_sub_4_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_121 = atomics_a_mask_size_7 & atomics_a_mask_eq_121; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_121 = atomics_a_mask_sub_4_1_7 | _atomics_a_mask_acc_T_121; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_122 = atomics_a_mask_sub_5_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_122 = atomics_a_mask_size_7 & atomics_a_mask_eq_122; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_122 = atomics_a_mask_sub_5_1_7 | _atomics_a_mask_acc_T_122; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_123 = atomics_a_mask_sub_5_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_123 = atomics_a_mask_size_7 & atomics_a_mask_eq_123; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_123 = atomics_a_mask_sub_5_1_7 | _atomics_a_mask_acc_T_123; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_124 = atomics_a_mask_sub_6_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_124 = atomics_a_mask_size_7 & atomics_a_mask_eq_124; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_124 = atomics_a_mask_sub_6_1_7 | _atomics_a_mask_acc_T_124; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_125 = atomics_a_mask_sub_6_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_125 = atomics_a_mask_size_7 & atomics_a_mask_eq_125; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_125 = atomics_a_mask_sub_6_1_7 | _atomics_a_mask_acc_T_125; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_126 = atomics_a_mask_sub_7_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_126 = atomics_a_mask_size_7 & atomics_a_mask_eq_126; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_126 = atomics_a_mask_sub_7_1_7 | _atomics_a_mask_acc_T_126; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_127 = atomics_a_mask_sub_7_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_127 = atomics_a_mask_size_7 & atomics_a_mask_eq_127; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_127 = atomics_a_mask_sub_7_1_7 | _atomics_a_mask_acc_T_127; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_7 = {atomics_a_mask_acc_113, atomics_a_mask_acc_112}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_7 = {atomics_a_mask_acc_115, atomics_a_mask_acc_114}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_7 = {atomics_a_mask_lo_lo_hi_7, atomics_a_mask_lo_lo_lo_7}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_7 = {atomics_a_mask_acc_117, atomics_a_mask_acc_116}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_7 = {atomics_a_mask_acc_119, atomics_a_mask_acc_118}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_7 = {atomics_a_mask_lo_hi_hi_7, atomics_a_mask_lo_hi_lo_7}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_7 = {atomics_a_mask_lo_hi_7, atomics_a_mask_lo_lo_7}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_7 = {atomics_a_mask_acc_121, atomics_a_mask_acc_120}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_7 = {atomics_a_mask_acc_123, atomics_a_mask_acc_122}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_7 = {atomics_a_mask_hi_lo_hi_7, atomics_a_mask_hi_lo_lo_7}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_7 = {atomics_a_mask_acc_125, atomics_a_mask_acc_124}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_7 = {atomics_a_mask_acc_127, atomics_a_mask_acc_126}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_7 = {atomics_a_mask_hi_hi_hi_7, atomics_a_mask_hi_hi_lo_7}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_7 = {atomics_a_mask_hi_hi_7, atomics_a_mask_hi_lo_7}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_7 = {atomics_a_mask_hi_7, atomics_a_mask_lo_7}; // @[Misc.scala:222:10] assign atomics_a_7_mask = _atomics_a_mask_T_7; // @[Misc.scala:222:10] wire [40:0] _atomics_legal_T_477 = {1'h0, _atomics_legal_T_476}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_478 = _atomics_legal_T_477 & 41'h9C110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_479 = _atomics_legal_T_478; // @[Parameters.scala:137:46] wire _atomics_legal_T_480 = _atomics_legal_T_479 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_482 = {1'h0, _atomics_legal_T_481}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_483 = _atomics_legal_T_482 & 41'h9E101000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_484 = _atomics_legal_T_483; // @[Parameters.scala:137:46] wire _atomics_legal_T_485 = _atomics_legal_T_484 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_487 = {1'h0, _atomics_legal_T_486}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_488 = _atomics_legal_T_487 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_489 = _atomics_legal_T_488; // @[Parameters.scala:137:46] wire _atomics_legal_T_490 = _atomics_legal_T_489 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_492 = {1'h0, _atomics_legal_T_491}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_493 = _atomics_legal_T_492 & 41'h9C000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_494 = _atomics_legal_T_493; // @[Parameters.scala:137:46] wire _atomics_legal_T_495 = _atomics_legal_T_494 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_497 = {1'h0, _atomics_legal_T_496}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_498 = _atomics_legal_T_497 & 41'h9E111000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_499 = _atomics_legal_T_498; // @[Parameters.scala:137:46] wire _atomics_legal_T_500 = _atomics_legal_T_499 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_501 = _atomics_legal_T_480 | _atomics_legal_T_485; // @[Parameters.scala:685:42] wire _atomics_legal_T_502 = _atomics_legal_T_501 | _atomics_legal_T_490; // @[Parameters.scala:685:42] wire _atomics_legal_T_503 = _atomics_legal_T_502 | _atomics_legal_T_495; // @[Parameters.scala:685:42] wire _atomics_legal_T_504 = _atomics_legal_T_503 | _atomics_legal_T_500; // @[Parameters.scala:685:42] wire _atomics_legal_T_505 = _atomics_legal_T_504; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_529 = _atomics_legal_T_505; // @[Parameters.scala:684:54, :686:26] wire [40:0] _atomics_legal_T_508 = {1'h0, _atomics_legal_T_507}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_509 = _atomics_legal_T_508 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_510 = _atomics_legal_T_509; // @[Parameters.scala:137:46] wire _atomics_legal_T_511 = _atomics_legal_T_510 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_518 = {1'h0, _atomics_legal_T_517}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_519 = _atomics_legal_T_518 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_520 = _atomics_legal_T_519; // @[Parameters.scala:137:46] wire _atomics_legal_T_521 = _atomics_legal_T_520 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _atomics_legal_T_523 = {1'h0, _atomics_legal_T_522}; // @[Parameters.scala:137:{31,41}] wire [40:0] _atomics_legal_T_524 = _atomics_legal_T_523 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _atomics_legal_T_525 = _atomics_legal_T_524; // @[Parameters.scala:137:46] wire _atomics_legal_T_526 = _atomics_legal_T_525 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _atomics_legal_T_527 = _atomics_legal_T_521 | _atomics_legal_T_526; // @[Parameters.scala:685:42] wire _atomics_legal_T_528 = _atomics_legal_T_527; // @[Parameters.scala:684:54, :685:42] wire _atomics_legal_T_530 = _atomics_legal_T_529; // @[Parameters.scala:686:26] wire atomics_legal_8 = _atomics_legal_T_530 | _atomics_legal_T_528; // @[Parameters.scala:684:54, :686:26] wire [15:0] _atomics_a_mask_T_8; // @[Misc.scala:222:10] wire [15:0] atomics_a_8_mask; // @[Edges.scala:517:17] wire [1:0] atomics_a_mask_sizeOH_shiftAmount_8 = _atomics_a_mask_sizeOH_T_24[1:0]; // @[OneHot.scala:64:49] wire [3:0] _atomics_a_mask_sizeOH_T_25 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_8; // @[OneHot.scala:64:49, :65:12] wire [3:0] _atomics_a_mask_sizeOH_T_26 = _atomics_a_mask_sizeOH_T_25; // @[OneHot.scala:65:{12,27}] wire [3:0] atomics_a_mask_sizeOH_8 = {_atomics_a_mask_sizeOH_T_26[3:1], 1'h1}; // @[OneHot.scala:65:27] wire atomics_a_mask_sub_sub_sub_size_8 = atomics_a_mask_sizeOH_8[3]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_sub_1_2_8 = atomics_a_mask_sub_sub_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_sub_sub_nbit_8 = ~atomics_a_mask_sub_sub_sub_bit_8; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_sub_0_2_8 = atomics_a_mask_sub_sub_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_sub_acc_T_16 = atomics_a_mask_sub_sub_sub_size_8 & atomics_a_mask_sub_sub_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_0_1_8 = _atomics_a_mask_sub_sub_sub_acc_T_16; // @[Misc.scala:215:{29,38}] wire _atomics_a_mask_sub_sub_sub_acc_T_17 = atomics_a_mask_sub_sub_sub_size_8 & atomics_a_mask_sub_sub_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_sub_1_1_8 = _atomics_a_mask_sub_sub_sub_acc_T_17; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_size_8 = atomics_a_mask_sizeOH_8[2]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_sub_nbit_8 = ~atomics_a_mask_sub_sub_bit_8; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_sub_0_2_8 = atomics_a_mask_sub_sub_sub_0_2_8 & atomics_a_mask_sub_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_32 = atomics_a_mask_sub_sub_size_8 & atomics_a_mask_sub_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_0_1_8 = atomics_a_mask_sub_sub_sub_0_1_8 | _atomics_a_mask_sub_sub_acc_T_32; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_1_2_8 = atomics_a_mask_sub_sub_sub_0_2_8 & atomics_a_mask_sub_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_33 = atomics_a_mask_sub_sub_size_8 & atomics_a_mask_sub_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_1_1_8 = atomics_a_mask_sub_sub_sub_0_1_8 | _atomics_a_mask_sub_sub_acc_T_33; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_2_2_8 = atomics_a_mask_sub_sub_sub_1_2_8 & atomics_a_mask_sub_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_sub_acc_T_34 = atomics_a_mask_sub_sub_size_8 & atomics_a_mask_sub_sub_2_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_2_1_8 = atomics_a_mask_sub_sub_sub_1_1_8 | _atomics_a_mask_sub_sub_acc_T_34; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_sub_3_2_8 = atomics_a_mask_sub_sub_sub_1_2_8 & atomics_a_mask_sub_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_sub_acc_T_35 = atomics_a_mask_sub_sub_size_8 & atomics_a_mask_sub_sub_3_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_sub_3_1_8 = atomics_a_mask_sub_sub_sub_1_1_8 | _atomics_a_mask_sub_sub_acc_T_35; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_size_8 = atomics_a_mask_sizeOH_8[1]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_sub_nbit_8 = ~atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_sub_0_2_8 = atomics_a_mask_sub_sub_0_2_8 & atomics_a_mask_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_64 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_0_1_8 = atomics_a_mask_sub_sub_0_1_8 | _atomics_a_mask_sub_acc_T_64; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_1_2_8 = atomics_a_mask_sub_sub_0_2_8 & atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_65 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_1_1_8 = atomics_a_mask_sub_sub_0_1_8 | _atomics_a_mask_sub_acc_T_65; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_2_2_8 = atomics_a_mask_sub_sub_1_2_8 & atomics_a_mask_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_66 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_2_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_2_1_8 = atomics_a_mask_sub_sub_1_1_8 | _atomics_a_mask_sub_acc_T_66; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_3_2_8 = atomics_a_mask_sub_sub_1_2_8 & atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_67 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_3_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_3_1_8 = atomics_a_mask_sub_sub_1_1_8 | _atomics_a_mask_sub_acc_T_67; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_4_2_8 = atomics_a_mask_sub_sub_2_2_8 & atomics_a_mask_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_68 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_4_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_4_1_8 = atomics_a_mask_sub_sub_2_1_8 | _atomics_a_mask_sub_acc_T_68; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_5_2_8 = atomics_a_mask_sub_sub_2_2_8 & atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_69 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_5_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_5_1_8 = atomics_a_mask_sub_sub_2_1_8 | _atomics_a_mask_sub_acc_T_69; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_6_2_8 = atomics_a_mask_sub_sub_3_2_8 & atomics_a_mask_sub_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_sub_acc_T_70 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_6_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_6_1_8 = atomics_a_mask_sub_sub_3_1_8 | _atomics_a_mask_sub_acc_T_70; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_sub_7_2_8 = atomics_a_mask_sub_sub_3_2_8 & atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_sub_acc_T_71 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_7_2_8; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_sub_7_1_8 = atomics_a_mask_sub_sub_3_1_8 | _atomics_a_mask_sub_acc_T_71; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_size_8 = atomics_a_mask_sizeOH_8[0]; // @[Misc.scala:202:81, :209:26] wire atomics_a_mask_nbit_8 = ~atomics_a_mask_bit_8; // @[Misc.scala:210:26, :211:20] wire atomics_a_mask_eq_128 = atomics_a_mask_sub_0_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_128 = atomics_a_mask_size_8 & atomics_a_mask_eq_128; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_128 = atomics_a_mask_sub_0_1_8 | _atomics_a_mask_acc_T_128; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_129 = atomics_a_mask_sub_0_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_129 = atomics_a_mask_size_8 & atomics_a_mask_eq_129; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_129 = atomics_a_mask_sub_0_1_8 | _atomics_a_mask_acc_T_129; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_130 = atomics_a_mask_sub_1_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_130 = atomics_a_mask_size_8 & atomics_a_mask_eq_130; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_130 = atomics_a_mask_sub_1_1_8 | _atomics_a_mask_acc_T_130; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_131 = atomics_a_mask_sub_1_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_131 = atomics_a_mask_size_8 & atomics_a_mask_eq_131; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_131 = atomics_a_mask_sub_1_1_8 | _atomics_a_mask_acc_T_131; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_132 = atomics_a_mask_sub_2_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_132 = atomics_a_mask_size_8 & atomics_a_mask_eq_132; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_132 = atomics_a_mask_sub_2_1_8 | _atomics_a_mask_acc_T_132; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_133 = atomics_a_mask_sub_2_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_133 = atomics_a_mask_size_8 & atomics_a_mask_eq_133; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_133 = atomics_a_mask_sub_2_1_8 | _atomics_a_mask_acc_T_133; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_134 = atomics_a_mask_sub_3_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_134 = atomics_a_mask_size_8 & atomics_a_mask_eq_134; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_134 = atomics_a_mask_sub_3_1_8 | _atomics_a_mask_acc_T_134; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_135 = atomics_a_mask_sub_3_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_135 = atomics_a_mask_size_8 & atomics_a_mask_eq_135; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_135 = atomics_a_mask_sub_3_1_8 | _atomics_a_mask_acc_T_135; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_136 = atomics_a_mask_sub_4_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_136 = atomics_a_mask_size_8 & atomics_a_mask_eq_136; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_136 = atomics_a_mask_sub_4_1_8 | _atomics_a_mask_acc_T_136; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_137 = atomics_a_mask_sub_4_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_137 = atomics_a_mask_size_8 & atomics_a_mask_eq_137; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_137 = atomics_a_mask_sub_4_1_8 | _atomics_a_mask_acc_T_137; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_138 = atomics_a_mask_sub_5_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_138 = atomics_a_mask_size_8 & atomics_a_mask_eq_138; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_138 = atomics_a_mask_sub_5_1_8 | _atomics_a_mask_acc_T_138; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_139 = atomics_a_mask_sub_5_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_139 = atomics_a_mask_size_8 & atomics_a_mask_eq_139; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_139 = atomics_a_mask_sub_5_1_8 | _atomics_a_mask_acc_T_139; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_140 = atomics_a_mask_sub_6_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_140 = atomics_a_mask_size_8 & atomics_a_mask_eq_140; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_140 = atomics_a_mask_sub_6_1_8 | _atomics_a_mask_acc_T_140; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_141 = atomics_a_mask_sub_6_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_141 = atomics_a_mask_size_8 & atomics_a_mask_eq_141; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_141 = atomics_a_mask_sub_6_1_8 | _atomics_a_mask_acc_T_141; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_142 = atomics_a_mask_sub_7_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27] wire _atomics_a_mask_acc_T_142 = atomics_a_mask_size_8 & atomics_a_mask_eq_142; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_142 = atomics_a_mask_sub_7_1_8 | _atomics_a_mask_acc_T_142; // @[Misc.scala:215:{29,38}] wire atomics_a_mask_eq_143 = atomics_a_mask_sub_7_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27] wire _atomics_a_mask_acc_T_143 = atomics_a_mask_size_8 & atomics_a_mask_eq_143; // @[Misc.scala:209:26, :214:27, :215:38] wire atomics_a_mask_acc_143 = atomics_a_mask_sub_7_1_8 | _atomics_a_mask_acc_T_143; // @[Misc.scala:215:{29,38}] wire [1:0] atomics_a_mask_lo_lo_lo_8 = {atomics_a_mask_acc_129, atomics_a_mask_acc_128}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_lo_hi_8 = {atomics_a_mask_acc_131, atomics_a_mask_acc_130}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_lo_8 = {atomics_a_mask_lo_lo_hi_8, atomics_a_mask_lo_lo_lo_8}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_lo_hi_lo_8 = {atomics_a_mask_acc_133, atomics_a_mask_acc_132}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_lo_hi_hi_8 = {atomics_a_mask_acc_135, atomics_a_mask_acc_134}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_lo_hi_8 = {atomics_a_mask_lo_hi_hi_8, atomics_a_mask_lo_hi_lo_8}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_lo_8 = {atomics_a_mask_lo_hi_8, atomics_a_mask_lo_lo_8}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_lo_lo_8 = {atomics_a_mask_acc_137, atomics_a_mask_acc_136}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_lo_hi_8 = {atomics_a_mask_acc_139, atomics_a_mask_acc_138}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_lo_8 = {atomics_a_mask_hi_lo_hi_8, atomics_a_mask_hi_lo_lo_8}; // @[Misc.scala:222:10] wire [1:0] atomics_a_mask_hi_hi_lo_8 = {atomics_a_mask_acc_141, atomics_a_mask_acc_140}; // @[Misc.scala:215:29, :222:10] wire [1:0] atomics_a_mask_hi_hi_hi_8 = {atomics_a_mask_acc_143, atomics_a_mask_acc_142}; // @[Misc.scala:215:29, :222:10] wire [3:0] atomics_a_mask_hi_hi_8 = {atomics_a_mask_hi_hi_hi_8, atomics_a_mask_hi_hi_lo_8}; // @[Misc.scala:222:10] wire [7:0] atomics_a_mask_hi_8 = {atomics_a_mask_hi_hi_8, atomics_a_mask_hi_lo_8}; // @[Misc.scala:222:10] assign _atomics_a_mask_T_8 = {atomics_a_mask_hi_8, atomics_a_mask_lo_8}; // @[Misc.scala:222:10] assign atomics_a_8_mask = _atomics_a_mask_T_8; // @[Misc.scala:222:10] wire _T_17 = req_uop_mem_cmd == 5'h4; // @[mshrs.scala:421:16, :439:75] wire _atomics_T; // @[mshrs.scala:439:75] assign _atomics_T = _T_17; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T; // @[package.scala:16:47] assign _io_mem_access_bits_T = _T_17; // @[package.scala:16:47] wire _io_mem_access_bits_T_24; // @[package.scala:16:47] assign _io_mem_access_bits_T_24 = _T_17; // @[package.scala:16:47] wire _send_resp_T_7; // @[package.scala:16:47] assign _send_resp_T_7 = _T_17; // @[package.scala:16:47] wire [2:0] _GEN_9 = _atomics_T ? 3'h3 : 3'h0; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_1_opcode; // @[mshrs.scala:439:75] assign _atomics_T_1_opcode = _GEN_9; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_1_param; // @[mshrs.scala:439:75] assign _atomics_T_1_param = _GEN_9; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_1_size = _atomics_T ? atomics_a_size : 4'h0; // @[Edges.scala:534:17] wire [2:0] _atomics_T_1_source = _atomics_T ? 3'h5 : 3'h0; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_1_address = _atomics_T ? atomics_a_address : 32'h0; // @[Edges.scala:534:17] wire [15:0] _atomics_T_1_mask = _atomics_T ? atomics_a_mask : 16'h0; // @[Edges.scala:534:17] wire [127:0] _atomics_T_1_data = _atomics_T ? atomics_a_data : 128'h0; // @[Edges.scala:534:17] wire _T_18 = req_uop_mem_cmd == 5'h9; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_2; // @[mshrs.scala:439:75] assign _atomics_T_2 = _T_18; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_1; // @[package.scala:16:47] assign _io_mem_access_bits_T_1 = _T_18; // @[package.scala:16:47] wire _io_mem_access_bits_T_25; // @[package.scala:16:47] assign _io_mem_access_bits_T_25 = _T_18; // @[package.scala:16:47] wire _send_resp_T_8; // @[package.scala:16:47] assign _send_resp_T_8 = _T_18; // @[package.scala:16:47] wire [2:0] _atomics_T_3_opcode = _atomics_T_2 ? 3'h3 : _atomics_T_1_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_3_param = _atomics_T_2 ? 3'h0 : _atomics_T_1_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_3_size = _atomics_T_2 ? atomics_a_1_size : _atomics_T_1_size; // @[Edges.scala:534:17] wire [2:0] _atomics_T_3_source = _atomics_T_2 ? 3'h5 : _atomics_T_1_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_3_address = _atomics_T_2 ? atomics_a_1_address : _atomics_T_1_address; // @[Edges.scala:534:17] wire [15:0] _atomics_T_3_mask = _atomics_T_2 ? atomics_a_1_mask : _atomics_T_1_mask; // @[Edges.scala:534:17] wire [127:0] _atomics_T_3_data = _atomics_T_2 ? atomics_a_1_data : _atomics_T_1_data; // @[Edges.scala:534:17] wire _T_19 = req_uop_mem_cmd == 5'hA; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_4; // @[mshrs.scala:439:75] assign _atomics_T_4 = _T_19; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_2; // @[package.scala:16:47] assign _io_mem_access_bits_T_2 = _T_19; // @[package.scala:16:47] wire _io_mem_access_bits_T_26; // @[package.scala:16:47] assign _io_mem_access_bits_T_26 = _T_19; // @[package.scala:16:47] wire _send_resp_T_9; // @[package.scala:16:47] assign _send_resp_T_9 = _T_19; // @[package.scala:16:47] wire [2:0] _atomics_T_5_opcode = _atomics_T_4 ? 3'h3 : _atomics_T_3_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_5_param = _atomics_T_4 ? 3'h1 : _atomics_T_3_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_5_size = _atomics_T_4 ? atomics_a_2_size : _atomics_T_3_size; // @[Edges.scala:534:17] wire [2:0] _atomics_T_5_source = _atomics_T_4 ? 3'h5 : _atomics_T_3_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_5_address = _atomics_T_4 ? atomics_a_2_address : _atomics_T_3_address; // @[Edges.scala:534:17] wire [15:0] _atomics_T_5_mask = _atomics_T_4 ? atomics_a_2_mask : _atomics_T_3_mask; // @[Edges.scala:534:17] wire [127:0] _atomics_T_5_data = _atomics_T_4 ? atomics_a_2_data : _atomics_T_3_data; // @[Edges.scala:534:17] wire _T_20 = req_uop_mem_cmd == 5'hB; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_6; // @[mshrs.scala:439:75] assign _atomics_T_6 = _T_20; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_3; // @[package.scala:16:47] assign _io_mem_access_bits_T_3 = _T_20; // @[package.scala:16:47] wire _io_mem_access_bits_T_27; // @[package.scala:16:47] assign _io_mem_access_bits_T_27 = _T_20; // @[package.scala:16:47] wire _send_resp_T_10; // @[package.scala:16:47] assign _send_resp_T_10 = _T_20; // @[package.scala:16:47] wire [2:0] _atomics_T_7_opcode = _atomics_T_6 ? 3'h3 : _atomics_T_5_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_7_param = _atomics_T_6 ? 3'h2 : _atomics_T_5_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_7_size = _atomics_T_6 ? atomics_a_3_size : _atomics_T_5_size; // @[Edges.scala:534:17] wire [2:0] _atomics_T_7_source = _atomics_T_6 ? 3'h5 : _atomics_T_5_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_7_address = _atomics_T_6 ? atomics_a_3_address : _atomics_T_5_address; // @[Edges.scala:534:17] wire [15:0] _atomics_T_7_mask = _atomics_T_6 ? atomics_a_3_mask : _atomics_T_5_mask; // @[Edges.scala:534:17] wire [127:0] _atomics_T_7_data = _atomics_T_6 ? atomics_a_3_data : _atomics_T_5_data; // @[Edges.scala:534:17] wire _T_24 = req_uop_mem_cmd == 5'h8; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_8; // @[mshrs.scala:439:75] assign _atomics_T_8 = _T_24; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_7; // @[package.scala:16:47] assign _io_mem_access_bits_T_7 = _T_24; // @[package.scala:16:47] wire _io_mem_access_bits_T_31; // @[package.scala:16:47] assign _io_mem_access_bits_T_31 = _T_24; // @[package.scala:16:47] wire _send_resp_T_14; // @[package.scala:16:47] assign _send_resp_T_14 = _T_24; // @[package.scala:16:47] wire [2:0] _atomics_T_9_opcode = _atomics_T_8 ? 3'h2 : _atomics_T_7_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_9_param = _atomics_T_8 ? 3'h4 : _atomics_T_7_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_9_size = _atomics_T_8 ? atomics_a_4_size : _atomics_T_7_size; // @[Edges.scala:517:17] wire [2:0] _atomics_T_9_source = _atomics_T_8 ? 3'h5 : _atomics_T_7_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_9_address = _atomics_T_8 ? atomics_a_4_address : _atomics_T_7_address; // @[Edges.scala:517:17] wire [15:0] _atomics_T_9_mask = _atomics_T_8 ? atomics_a_4_mask : _atomics_T_7_mask; // @[Edges.scala:517:17] wire [127:0] _atomics_T_9_data = _atomics_T_8 ? atomics_a_4_data : _atomics_T_7_data; // @[Edges.scala:517:17] wire _T_25 = req_uop_mem_cmd == 5'hC; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_10; // @[mshrs.scala:439:75] assign _atomics_T_10 = _T_25; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_8; // @[package.scala:16:47] assign _io_mem_access_bits_T_8 = _T_25; // @[package.scala:16:47] wire _io_mem_access_bits_T_32; // @[package.scala:16:47] assign _io_mem_access_bits_T_32 = _T_25; // @[package.scala:16:47] wire _send_resp_T_15; // @[package.scala:16:47] assign _send_resp_T_15 = _T_25; // @[package.scala:16:47] wire [2:0] _atomics_T_11_opcode = _atomics_T_10 ? 3'h2 : _atomics_T_9_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_11_param = _atomics_T_10 ? 3'h0 : _atomics_T_9_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_11_size = _atomics_T_10 ? atomics_a_5_size : _atomics_T_9_size; // @[Edges.scala:517:17] wire [2:0] _atomics_T_11_source = _atomics_T_10 ? 3'h5 : _atomics_T_9_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_11_address = _atomics_T_10 ? atomics_a_5_address : _atomics_T_9_address; // @[Edges.scala:517:17] wire [15:0] _atomics_T_11_mask = _atomics_T_10 ? atomics_a_5_mask : _atomics_T_9_mask; // @[Edges.scala:517:17] wire [127:0] _atomics_T_11_data = _atomics_T_10 ? atomics_a_5_data : _atomics_T_9_data; // @[Edges.scala:517:17] wire _T_26 = req_uop_mem_cmd == 5'hD; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_12; // @[mshrs.scala:439:75] assign _atomics_T_12 = _T_26; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_9; // @[package.scala:16:47] assign _io_mem_access_bits_T_9 = _T_26; // @[package.scala:16:47] wire _io_mem_access_bits_T_33; // @[package.scala:16:47] assign _io_mem_access_bits_T_33 = _T_26; // @[package.scala:16:47] wire _send_resp_T_16; // @[package.scala:16:47] assign _send_resp_T_16 = _T_26; // @[package.scala:16:47] wire [2:0] _atomics_T_13_opcode = _atomics_T_12 ? 3'h2 : _atomics_T_11_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_13_param = _atomics_T_12 ? 3'h1 : _atomics_T_11_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_13_size = _atomics_T_12 ? atomics_a_6_size : _atomics_T_11_size; // @[Edges.scala:517:17] wire [2:0] _atomics_T_13_source = _atomics_T_12 ? 3'h5 : _atomics_T_11_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_13_address = _atomics_T_12 ? atomics_a_6_address : _atomics_T_11_address; // @[Edges.scala:517:17] wire [15:0] _atomics_T_13_mask = _atomics_T_12 ? atomics_a_6_mask : _atomics_T_11_mask; // @[Edges.scala:517:17] wire [127:0] _atomics_T_13_data = _atomics_T_12 ? atomics_a_6_data : _atomics_T_11_data; // @[Edges.scala:517:17] wire _T_27 = req_uop_mem_cmd == 5'hE; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_14; // @[mshrs.scala:439:75] assign _atomics_T_14 = _T_27; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_10; // @[package.scala:16:47] assign _io_mem_access_bits_T_10 = _T_27; // @[package.scala:16:47] wire _io_mem_access_bits_T_34; // @[package.scala:16:47] assign _io_mem_access_bits_T_34 = _T_27; // @[package.scala:16:47] wire _send_resp_T_17; // @[package.scala:16:47] assign _send_resp_T_17 = _T_27; // @[package.scala:16:47] wire [2:0] _atomics_T_15_opcode = _atomics_T_14 ? 3'h2 : _atomics_T_13_opcode; // @[mshrs.scala:439:75] wire [2:0] _atomics_T_15_param = _atomics_T_14 ? 3'h2 : _atomics_T_13_param; // @[mshrs.scala:439:75] wire [3:0] _atomics_T_15_size = _atomics_T_14 ? atomics_a_7_size : _atomics_T_13_size; // @[Edges.scala:517:17] wire [2:0] _atomics_T_15_source = _atomics_T_14 ? 3'h5 : _atomics_T_13_source; // @[mshrs.scala:439:75] wire [31:0] _atomics_T_15_address = _atomics_T_14 ? atomics_a_7_address : _atomics_T_13_address; // @[Edges.scala:517:17] wire [15:0] _atomics_T_15_mask = _atomics_T_14 ? atomics_a_7_mask : _atomics_T_13_mask; // @[Edges.scala:517:17] wire [127:0] _atomics_T_15_data = _atomics_T_14 ? atomics_a_7_data : _atomics_T_13_data; // @[Edges.scala:517:17] wire _T_28 = req_uop_mem_cmd == 5'hF; // @[mshrs.scala:421:16, :439:75] wire _atomics_T_16; // @[mshrs.scala:439:75] assign _atomics_T_16 = _T_28; // @[mshrs.scala:439:75] wire _io_mem_access_bits_T_11; // @[package.scala:16:47] assign _io_mem_access_bits_T_11 = _T_28; // @[package.scala:16:47] wire _io_mem_access_bits_T_35; // @[package.scala:16:47] assign _io_mem_access_bits_T_35 = _T_28; // @[package.scala:16:47] wire _send_resp_T_18; // @[package.scala:16:47] assign _send_resp_T_18 = _T_28; // @[package.scala:16:47] wire [2:0] atomics_opcode = _atomics_T_16 ? 3'h2 : _atomics_T_15_opcode; // @[mshrs.scala:439:75] wire [2:0] atomics_param = _atomics_T_16 ? 3'h3 : _atomics_T_15_param; // @[mshrs.scala:439:75] wire [3:0] atomics_size = _atomics_T_16 ? atomics_a_8_size : _atomics_T_15_size; // @[Edges.scala:517:17] wire [2:0] atomics_source = _atomics_T_16 ? 3'h5 : _atomics_T_15_source; // @[mshrs.scala:439:75] wire [31:0] atomics_address = _atomics_T_16 ? atomics_a_8_address : _atomics_T_15_address; // @[Edges.scala:517:17] wire [15:0] atomics_mask = _atomics_T_16 ? atomics_a_8_mask : _atomics_T_15_mask; // @[Edges.scala:517:17] wire [127:0] atomics_data = _atomics_T_16 ? atomics_a_8_data : _atomics_T_15_data; // @[Edges.scala:517:17]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_57( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] output io_q // @[ShiftReg.scala:36:14] ); wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_81 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_157( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] output io_q // @[ShiftReg.scala:36:14] ); wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_20( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [7:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [9:0] io_status_bits_set, // @[MSHR.scala:86:14] output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [2:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [7:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [7:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [4:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [4:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [7:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [7:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [4:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [4:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57] wire excluded_client = 1'h0; // @[MSHR.scala:279:28] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34] wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53] wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66] wire [4:0] io_schedule_bits_a_bits_source = 5'h0; // @[MSHR.scala:84:7] wire [4:0] io_schedule_bits_c_bits_source = 5'h0; // @[MSHR.scala:84:7] wire [4:0] io_schedule_bits_d_bits_sink = 5'h0; // @[MSHR.scala:84:7] wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [7:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [7:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [7:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [12:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [9:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64] reg [12:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [2:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 8'h40; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 8'h40; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [7:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 8'h40; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File EgressUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} class EgressUnit(coupleSAVA: Boolean, combineSAST: Boolean, inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: EgressChannelParams) (implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) { class EgressUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) { val out = Decoupled(new EgressFlit(cParam.payloadBits)) } val io = IO(new EgressUnitIO) val channel_empty = RegInit(true.B) val flow = Reg(new FlowRoutingBundle) val q = Module(new Queue(new EgressFlit(cParam.payloadBits), 3 - (if (combineSAST) 1 else 0), flow=true)) q.io.enq.valid := io.in(0).valid q.io.enq.bits.head := io.in(0).bits.head q.io.enq.bits.tail := io.in(0).bits.tail val flows = cParam.possibleFlows.toSeq if (flows.size == 0) { q.io.enq.bits.ingress_id := 0.U(1.W) } else { q.io.enq.bits.ingress_id := Mux1H( flows.map(f => (f.ingressNode.U === io.in(0).bits.flow.ingress_node && f.ingressNodeId.U === io.in(0).bits.flow.ingress_node_id)), flows.map(f => f.ingressId.U(ingressIdBits.W)) ) } q.io.enq.bits.payload := io.in(0).bits.payload io.out <> q.io.deq assert(!(q.io.enq.valid && !q.io.enq.ready)) io.credit_available(0) := q.io.count === 0.U io.channel_status(0).occupied := !channel_empty io.channel_status(0).flow := flow when (io.credit_alloc(0).alloc && io.credit_alloc(0).tail) { channel_empty := true.B if (coupleSAVA) io.channel_status(0).occupied := false.B } when (io.allocs(0).alloc) { channel_empty := false.B flow := io.allocs(0).flow } }
module EgressUnit_9( // @[EgressUnit.scala:12:7] input clock, // @[EgressUnit.scala:12:7] input reset, // @[EgressUnit.scala:12:7] input io_in_0_valid, // @[EgressUnit.scala:18:14] input io_in_0_bits_head, // @[EgressUnit.scala:18:14] input io_in_0_bits_tail, // @[EgressUnit.scala:18:14] input [72:0] io_in_0_bits_payload, // @[EgressUnit.scala:18:14] input [3:0] io_in_0_bits_flow_ingress_node, // @[EgressUnit.scala:18:14] input [2:0] io_in_0_bits_flow_ingress_node_id, // @[EgressUnit.scala:18:14] output io_credit_available_0, // @[EgressUnit.scala:18:14] output io_channel_status_0_occupied, // @[EgressUnit.scala:18:14] input io_allocs_0_alloc, // @[EgressUnit.scala:18:14] input io_credit_alloc_0_alloc, // @[EgressUnit.scala:18:14] input io_credit_alloc_0_tail, // @[EgressUnit.scala:18:14] input io_out_ready, // @[EgressUnit.scala:18:14] output io_out_valid, // @[EgressUnit.scala:18:14] output io_out_bits_head, // @[EgressUnit.scala:18:14] output io_out_bits_tail, // @[EgressUnit.scala:18:14] output [72:0] io_out_bits_payload // @[EgressUnit.scala:18:14] ); wire _q_io_enq_ready; // @[EgressUnit.scala:22:17] wire [1:0] _q_io_count; // @[EgressUnit.scala:22:17] reg channel_empty; // @[EgressUnit.scala:20:30] wire _q_io_enq_bits_ingress_id_T_13 = io_in_0_bits_flow_ingress_node_id == 3'h1; // @[EgressUnit.scala:32:27]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_115( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [31:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7] wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54] wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RecFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class RecFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val in = Input(Bits((inExpWidth + inSigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in); if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- io.out := io.in<<(outSigWidth - inSigWidth) io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W) } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( inExpWidth, inSigWidth, outExpWidth, outSigWidth, flRoundOpt_sigMSBitAlwaysZero )) roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn) roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := rawIn roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } } File common.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ object consts { /*------------------------------------------------------------------------ | For rounding to integer values, rounding mode 'odd' rounds to minimum | magnitude instead, same as 'minMag'. *------------------------------------------------------------------------*/ def round_near_even = "b000".U(3.W) def round_minMag = "b001".U(3.W) def round_min = "b010".U(3.W) def round_max = "b011".U(3.W) def round_near_maxMag = "b100".U(3.W) def round_odd = "b110".U(3.W) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ def tininess_beforeRounding = 0.U def tininess_afterRounding = 1.U /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ def flRoundOpt_sigMSBitAlwaysZero = 1 def flRoundOpt_subnormsAlwaysExact = 2 def flRoundOpt_neverUnderflows = 4 def flRoundOpt_neverOverflows = 8 /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ def divSqrtOpt_twoBitsPerCycle = 16 } class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle { val isNaN: Bool = Bool() // overrides all other fields val isInf: Bool = Bool() // overrides 'isZero', 'sExp', and 'sig' val isZero: Bool = Bool() // overrides 'sExp' and 'sig' val sign: Bool = Bool() val sExp: SInt = SInt((expWidth + 2).W) val sig: UInt = UInt((sigWidth + 1).W) // 2 m.s. bits cannot both be 0 } //*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS: object isSigNaNRawFloat { def apply(in: RawFloat): Bool = in.isNaN && !in.sig(in.sigWidth - 2) }
module RecFNToRecFN_3( // @[RecFNToRecFN.scala:44:5] input [64:0] io_in, // @[RecFNToRecFN.scala:48:16] input [2:0] io_roundingMode, // @[RecFNToRecFN.scala:48:16] output [32:0] io_out, // @[RecFNToRecFN.scala:48:16] output [4:0] io_exceptionFlags // @[RecFNToRecFN.scala:48:16] ); wire [64:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RecFNToRecFN.scala:44:5] wire io_detectTininess = 1'h0; // @[RecFNToRecFN.scala:44:5] wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5] wire [4:0] io_exceptionFlags_0; // @[RecFNToRecFN.scala:44:5] wire [11:0] rawIn_exp = io_in_0[63:52]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawIn_isZero_T = rawIn_exp[11:9]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawIn_isSpecial_T = rawIn_exp[11:10]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [12:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [53:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [12:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [53:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawIn_out_isNaN_T = rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawIn_out_isInf_T = rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawIn_out_sign_T = io_in_0[64]; // @[rawFloatFromRecFN.scala:59:25] assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [51:0] _rawIn_out_sig_T_2 = io_in_0[51:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire _roundAnyRawFNToRecFN_io_invalidExc_T = rawIn_sig[51]; // @[rawFloatFromRecFN.scala:55:23] wire _roundAnyRawFNToRecFN_io_invalidExc_T_1 = ~_roundAnyRawFNToRecFN_io_invalidExc_T; // @[common.scala:82:{49,56}] wire _roundAnyRawFNToRecFN_io_invalidExc_T_2 = rawIn_isNaN & _roundAnyRawFNToRecFN_io_invalidExc_T_1; // @[rawFloatFromRecFN.scala:55:23] RoundAnyRawFNToRecFN_ie11_is53_oe8_os24_1 roundAnyRawFNToRecFN ( // @[RecFNToRecFN.scala:72:19] .io_invalidExc (_roundAnyRawFNToRecFN_io_invalidExc_T_2), // @[common.scala:82:46] .io_in_isNaN (rawIn_isNaN), // @[rawFloatFromRecFN.scala:55:23] .io_in_isInf (rawIn_isInf), // @[rawFloatFromRecFN.scala:55:23] .io_in_isZero (rawIn_isZero_0), // @[rawFloatFromRecFN.scala:55:23] .io_in_sign (rawIn_sign), // @[rawFloatFromRecFN.scala:55:23] .io_in_sExp (rawIn_sExp), // @[rawFloatFromRecFN.scala:55:23] .io_in_sig (rawIn_sig), // @[rawFloatFromRecFN.scala:55:23] .io_roundingMode (io_roundingMode_0), // @[RecFNToRecFN.scala:44:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RecFNToRecFN.scala:72:19] assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RecFNToRecFN.scala:44:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RegisterFile.scala: package saturn.backend import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.tile.{CoreModule} import freechips.rocketchip.util._ import saturn.common._ class OldestRRArbiter(val n: Int)(implicit p: Parameters) extends Module { val io = IO(new ArbiterIO(new VectorReadReq, n)) val arb = Module(new RRArbiter(new VectorReadReq, n)) io <> arb.io val oldest_oh = io.in.map(i => i.valid && i.bits.oldest) //assert(PopCount(oldest_oh) <= 1.U) when (oldest_oh.orR) { io.chosen := VecInit(oldest_oh).asUInt io.out.valid := true.B io.out.bits := Mux1H(oldest_oh, io.in.map(_.bits)) for (i <- 0 until n) { io.in(i).ready := oldest_oh(i) && io.out.ready } } } class RegisterReadXbar(n: Int, banks: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val in = Vec(n, Flipped(new VectorReadIO)) val out = Vec(banks, new VectorReadIO) }) val arbs = Seq.fill(banks) { Module(new OldestRRArbiter(n)) } for (i <- 0 until banks) { io.out(i).req <> arbs(i).io.out } val bankOffset = log2Ceil(banks) for (i <- 0 until n) { val bank_sel = if (bankOffset == 0) true.B else UIntToOH(io.in(i).req.bits.eg(bankOffset-1,0)) for (j <- 0 until banks) { arbs(j).io.in(i).valid := io.in(i).req.valid && bank_sel(j) arbs(j).io.in(i).bits.eg := io.in(i).req.bits.eg >> bankOffset arbs(j).io.in(i).bits.oldest := io.in(i).req.bits.oldest } io.in(i).req.ready := Mux1H(bank_sel, arbs.map(_.io.in(i).ready)) io.in(i).resp := Mux1H(bank_sel, io.out.map(_.resp)) } } class RegisterFileBank(reads: Int, maskReads: Int, rows: Int, maskRows: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val read = Vec(reads, Flipped(new VectorReadIO)) val mask_read = Vec(maskReads, Flipped(new VectorReadIO)) val write = Input(Valid(new VectorWrite(dLen))) val ll_write = Flipped(Decoupled(new VectorWrite(dLen))) }) val ll_write_valid = RegInit(false.B) val ll_write_bits = Reg(new VectorWrite(dLen)) val vrf = Mem(rows, Vec(dLen, Bool())) val v0_mask = Mem(maskRows, Vec(dLen, Bool())) for (read <- io.read) { read.req.ready := !(ll_write_valid && read.req.bits.eg === ll_write_bits.eg) read.resp := DontCare when (read.req.valid) { read.resp := vrf.read(read.req.bits.eg).asUInt } } for (mask_read <- io.mask_read) { mask_read.req.ready := !(ll_write_valid && mask_read.req.bits.eg === ll_write_bits.eg) mask_read.resp := DontCare when (mask_read.req.valid) { mask_read.resp := v0_mask.read(mask_read.req.bits.eg).asUInt } } val write = WireInit(io.write) io.ll_write.ready := false.B if (vParams.vrfHiccupBuffer) { when (!io.write.valid) { // drain hiccup buffer write.valid := ll_write_valid || io.ll_write.valid write.bits := Mux(ll_write_valid, ll_write_bits, io.ll_write.bits) ll_write_valid := false.B when (io.ll_write.valid && ll_write_valid) { ll_write_valid := true.B ll_write_bits := io.ll_write.bits } io.ll_write.ready := true.B } .elsewhen (!ll_write_valid) { // fill hiccup buffer when (io.ll_write.valid) { ll_write_valid := true.B ll_write_bits := io.ll_write.bits } io.ll_write.ready := true.B } } else { when (!io.write.valid) { io.ll_write.ready := true.B write.valid := io.ll_write.valid write.bits := io.ll_write.bits } } when (write.valid) { vrf.write( write.bits.eg, VecInit(write.bits.data.asBools), write.bits.mask.asBools) when (write.bits.eg < maskRows.U) { v0_mask.write( write.bits.eg, VecInit(write.bits.data.asBools), write.bits.mask.asBools) } } } class RegisterFile(reads: Seq[Int], maskReads: Seq[Int], pipeWrites: Int, llWrites: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val nBanks = vParams.vrfBanking // Support 1, 2, and 4 banks for the VRF require(nBanks == 1 || nBanks == 2 || nBanks == 4) val io = IO(new Bundle { val read = MixedVec(reads.map(rc => Vec(rc, Flipped(new VectorReadIO)))) val mask_read = MixedVec(maskReads.map(rc => Vec(rc, Flipped(new VectorReadIO)))) val pipe_writes = Vec(pipeWrites, Input(Valid(new VectorWrite(dLen)))) val ll_writes = Vec(llWrites, Flipped(Decoupled(new VectorWrite(dLen)))) }) val vrf = Seq.fill(nBanks) { Module(new RegisterFileBank(reads.size, maskReads.size, egsTotal/nBanks, if (egsPerVReg < nBanks) 1 else egsPerVReg / nBanks)) } reads.zipWithIndex.foreach { case (rc, i) => val xbar = Module(new RegisterReadXbar(rc, nBanks)) vrf.zipWithIndex.foreach { case (bank, j) => bank.io.read(i) <> xbar.io.out(j) } xbar.io.in <> io.read(i) } maskReads.zipWithIndex.foreach { case (rc, i) => val mask_xbar = Module(new RegisterReadXbar(rc, nBanks)) vrf.zipWithIndex.foreach { case (bank, j) => bank.io.mask_read(i) <> mask_xbar.io.out(j) } mask_xbar.io.in <> io.mask_read(i) } io.ll_writes.foreach(_.ready := false.B) vrf.zipWithIndex.foreach { case (rf, i) => val bank_match = io.pipe_writes.map { w => (w.bits.bankId === i.U) && w.valid } val bank_write_data = Mux1H(bank_match, io.pipe_writes.map(_.bits.data)) val bank_write_mask = Mux1H(bank_match, io.pipe_writes.map(_.bits.mask)) val bank_write_eg = Mux1H(bank_match, io.pipe_writes.map(_.bits.eg)) val bank_write_valid = bank_match.orR rf.io.write.valid := bank_write_valid rf.io.write.bits.data := bank_write_data rf.io.write.bits.mask := bank_write_mask rf.io.write.bits.eg := bank_write_eg >> vrfBankBits when (bank_write_valid) { PopCount(bank_match) === 1.U } val ll_arb = Module(new Arbiter(new VectorWrite(dLen), llWrites)) rf.io.ll_write <> ll_arb.io.out io.ll_writes.zipWithIndex.foreach { case (w, j) => ll_arb.io.in(j).valid := w.valid && w.bits.bankId === i.U ll_arb.io.in(j).bits.eg := w.bits.eg >> vrfBankBits ll_arb.io.in(j).bits.data := w.bits.data ll_arb.io.in(j).bits.mask := w.bits.mask when (ll_arb.io.in(j).ready && w.bits.bankId === i.U) { w.ready := true.B } } } }
module RegisterFile( // @[RegisterFile.scala:120:7] input clock, // @[RegisterFile.scala:120:7] input reset, // @[RegisterFile.scala:120:7] output io_read_2_0_req_ready, // @[RegisterFile.scala:126:14] input io_read_2_0_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_read_2_0_req_bits_eg, // @[RegisterFile.scala:126:14] input io_read_2_0_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_read_2_0_resp, // @[RegisterFile.scala:126:14] output io_read_2_1_req_ready, // @[RegisterFile.scala:126:14] input io_read_2_1_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_read_2_1_req_bits_eg, // @[RegisterFile.scala:126:14] input io_read_2_1_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_read_2_1_resp, // @[RegisterFile.scala:126:14] output io_read_1_0_req_ready, // @[RegisterFile.scala:126:14] input io_read_1_0_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_read_1_0_req_bits_eg, // @[RegisterFile.scala:126:14] input io_read_1_0_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_read_1_0_resp, // @[RegisterFile.scala:126:14] output io_read_0_0_req_ready, // @[RegisterFile.scala:126:14] input io_read_0_0_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_read_0_0_req_bits_eg, // @[RegisterFile.scala:126:14] input io_read_0_0_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_read_0_0_resp, // @[RegisterFile.scala:126:14] output io_read_0_1_req_ready, // @[RegisterFile.scala:126:14] input io_read_0_1_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_read_0_1_req_bits_eg, // @[RegisterFile.scala:126:14] input io_read_0_1_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_read_0_1_resp, // @[RegisterFile.scala:126:14] output io_read_0_2_req_ready, // @[RegisterFile.scala:126:14] input io_read_0_2_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_read_0_2_req_bits_eg, // @[RegisterFile.scala:126:14] output [127:0] io_read_0_2_resp, // @[RegisterFile.scala:126:14] output io_mask_read_0_0_req_ready, // @[RegisterFile.scala:126:14] input io_mask_read_0_0_req_valid, // @[RegisterFile.scala:126:14] input io_mask_read_0_0_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_mask_read_0_0_resp, // @[RegisterFile.scala:126:14] output io_mask_read_0_1_req_ready, // @[RegisterFile.scala:126:14] input io_mask_read_0_1_req_valid, // @[RegisterFile.scala:126:14] input io_mask_read_0_1_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_mask_read_0_1_resp, // @[RegisterFile.scala:126:14] output io_mask_read_0_2_req_ready, // @[RegisterFile.scala:126:14] input io_mask_read_0_2_req_valid, // @[RegisterFile.scala:126:14] input io_mask_read_0_2_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_mask_read_0_2_resp, // @[RegisterFile.scala:126:14] output io_mask_read_0_3_req_ready, // @[RegisterFile.scala:126:14] input io_mask_read_0_3_req_valid, // @[RegisterFile.scala:126:14] input io_mask_read_0_3_req_bits_oldest, // @[RegisterFile.scala:126:14] output [127:0] io_mask_read_0_3_resp, // @[RegisterFile.scala:126:14] output io_mask_read_0_4_req_ready, // @[RegisterFile.scala:126:14] input io_mask_read_0_4_req_valid, // @[RegisterFile.scala:126:14] input [4:0] io_mask_read_0_4_req_bits_eg, // @[RegisterFile.scala:126:14] output [127:0] io_mask_read_0_4_resp, // @[RegisterFile.scala:126:14] input io_pipe_writes_0_valid, // @[RegisterFile.scala:126:14] input [4:0] io_pipe_writes_0_bits_eg, // @[RegisterFile.scala:126:14] input [127:0] io_pipe_writes_0_bits_data, // @[RegisterFile.scala:126:14] input [127:0] io_pipe_writes_0_bits_mask, // @[RegisterFile.scala:126:14] output io_ll_writes_0_ready, // @[RegisterFile.scala:126:14] input io_ll_writes_0_valid, // @[RegisterFile.scala:126:14] input [4:0] io_ll_writes_0_bits_eg, // @[RegisterFile.scala:126:14] input [127:0] io_ll_writes_0_bits_data, // @[RegisterFile.scala:126:14] input [127:0] io_ll_writes_0_bits_mask, // @[RegisterFile.scala:126:14] input io_ll_writes_1_valid, // @[RegisterFile.scala:126:14] input [4:0] io_ll_writes_1_bits_eg, // @[RegisterFile.scala:126:14] output io_ll_writes_2_ready, // @[RegisterFile.scala:126:14] input io_ll_writes_2_valid, // @[RegisterFile.scala:126:14] input [4:0] io_ll_writes_2_bits_eg, // @[RegisterFile.scala:126:14] input [127:0] io_ll_writes_2_bits_data, // @[RegisterFile.scala:126:14] input [127:0] io_ll_writes_2_bits_mask // @[RegisterFile.scala:126:14] ); wire _ll_arb_3_io_in_0_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_3_io_in_2_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_3_io_out_valid; // @[RegisterFile.scala:167:24] wire [4:0] _ll_arb_3_io_out_bits_eg; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_3_io_out_bits_data; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_3_io_out_bits_mask; // @[RegisterFile.scala:167:24] wire _ll_arb_2_io_in_0_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_2_io_in_2_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_2_io_out_valid; // @[RegisterFile.scala:167:24] wire [4:0] _ll_arb_2_io_out_bits_eg; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_2_io_out_bits_data; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_2_io_out_bits_mask; // @[RegisterFile.scala:167:24] wire _ll_arb_1_io_in_0_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_1_io_in_2_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_1_io_out_valid; // @[RegisterFile.scala:167:24] wire [4:0] _ll_arb_1_io_out_bits_eg; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_1_io_out_bits_data; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_1_io_out_bits_mask; // @[RegisterFile.scala:167:24] wire _ll_arb_io_in_0_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_io_in_2_ready; // @[RegisterFile.scala:167:24] wire _ll_arb_io_out_valid; // @[RegisterFile.scala:167:24] wire [4:0] _ll_arb_io_out_bits_eg; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_io_out_bits_data; // @[RegisterFile.scala:167:24] wire [127:0] _ll_arb_io_out_bits_mask; // @[RegisterFile.scala:167:24] wire [4:0] _mask_xbar_io_out_0_req_bits_eg; // @[RegisterFile.scala:145:27] wire [4:0] _mask_xbar_io_out_1_req_bits_eg; // @[RegisterFile.scala:145:27] wire [4:0] _mask_xbar_io_out_2_req_bits_eg; // @[RegisterFile.scala:145:27] wire [4:0] _mask_xbar_io_out_3_req_bits_eg; // @[RegisterFile.scala:145:27] wire _xbar_2_io_out_0_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_2_io_out_0_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_2_io_out_1_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_2_io_out_1_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_2_io_out_2_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_2_io_out_2_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_2_io_out_3_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_2_io_out_3_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_1_io_out_0_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_1_io_out_0_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_1_io_out_1_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_1_io_out_1_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_1_io_out_2_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_1_io_out_2_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_1_io_out_3_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_1_io_out_3_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_io_out_0_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_io_out_0_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_io_out_1_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_io_out_1_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_io_out_2_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_io_out_2_req_bits_eg; // @[RegisterFile.scala:137:22] wire _xbar_io_out_3_req_valid; // @[RegisterFile.scala:137:22] wire [4:0] _xbar_io_out_3_req_bits_eg; // @[RegisterFile.scala:137:22] wire _vrf_3_io_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_3_io_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_3_io_read_1_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_3_io_read_1_resp; // @[RegisterFile.scala:134:38] wire _vrf_3_io_read_2_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_3_io_read_2_resp; // @[RegisterFile.scala:134:38] wire _vrf_3_io_mask_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_3_io_mask_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_3_io_ll_write_ready; // @[RegisterFile.scala:134:38] wire _vrf_2_io_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_2_io_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_2_io_read_1_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_2_io_read_1_resp; // @[RegisterFile.scala:134:38] wire _vrf_2_io_read_2_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_2_io_read_2_resp; // @[RegisterFile.scala:134:38] wire _vrf_2_io_mask_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_2_io_mask_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_2_io_ll_write_ready; // @[RegisterFile.scala:134:38] wire _vrf_1_io_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_1_io_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_1_io_read_1_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_1_io_read_1_resp; // @[RegisterFile.scala:134:38] wire _vrf_1_io_read_2_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_1_io_read_2_resp; // @[RegisterFile.scala:134:38] wire _vrf_1_io_mask_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_1_io_mask_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_1_io_ll_write_ready; // @[RegisterFile.scala:134:38] wire _vrf_0_io_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_0_io_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_0_io_read_1_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_0_io_read_1_resp; // @[RegisterFile.scala:134:38] wire _vrf_0_io_read_2_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_0_io_read_2_resp; // @[RegisterFile.scala:134:38] wire _vrf_0_io_mask_read_0_req_ready; // @[RegisterFile.scala:134:38] wire [127:0] _vrf_0_io_mask_read_0_resp; // @[RegisterFile.scala:134:38] wire _vrf_0_io_ll_write_ready; // @[RegisterFile.scala:134:38] wire [4:0] vrf_3_io_write_bits_eg = {2'h0, io_pipe_writes_0_bits_eg[4:2]}; // @[RegisterFile.scala:155:63, :164:{25,42}] wire [4:0] ll_arb_io_in_0_bits_eg = {2'h0, io_ll_writes_0_bits_eg[4:2]}; // @[RegisterFile.scala:155:63, :172:{33,46}] wire [4:0] ll_arb_io_in_1_bits_eg = {2'h0, io_ll_writes_1_bits_eg[4:2]}; // @[RegisterFile.scala:155:63, :172:{33,46}] wire [4:0] ll_arb_io_in_2_bits_eg = {2'h0, io_ll_writes_2_bits_eg[4:2]}; // @[RegisterFile.scala:155:63, :172:{33,46}] RegisterFileBank vrf_0 ( // @[RegisterFile.scala:134:38] .clock (clock), .reset (reset), .io_read_0_req_ready (_vrf_0_io_read_0_req_ready), .io_read_0_req_valid (_xbar_io_out_0_req_valid), // @[RegisterFile.scala:137:22] .io_read_0_req_bits_eg (_xbar_io_out_0_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_0_resp (_vrf_0_io_read_0_resp), .io_read_1_req_ready (_vrf_0_io_read_1_req_ready), .io_read_1_req_valid (_xbar_1_io_out_0_req_valid), // @[RegisterFile.scala:137:22] .io_read_1_req_bits_eg (_xbar_1_io_out_0_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_1_resp (_vrf_0_io_read_1_resp), .io_read_2_req_ready (_vrf_0_io_read_2_req_ready), .io_read_2_req_valid (_xbar_2_io_out_0_req_valid), // @[RegisterFile.scala:137:22] .io_read_2_req_bits_eg (_xbar_2_io_out_0_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_2_resp (_vrf_0_io_read_2_resp), .io_mask_read_0_req_ready (_vrf_0_io_mask_read_0_req_ready), .io_mask_read_0_req_bits_eg (_mask_xbar_io_out_0_req_bits_eg), // @[RegisterFile.scala:145:27] .io_mask_read_0_resp (_vrf_0_io_mask_read_0_resp), .io_write_valid (io_pipe_writes_0_bits_eg[1:0] == 2'h0 & io_pipe_writes_0_valid), // @[Bundles.scala:112:49] .io_write_bits_eg (vrf_3_io_write_bits_eg), // @[RegisterFile.scala:164:25] .io_write_bits_data (io_pipe_writes_0_bits_data), .io_write_bits_mask (io_pipe_writes_0_bits_mask), .io_ll_write_ready (_vrf_0_io_ll_write_ready), .io_ll_write_valid (_ll_arb_io_out_valid), // @[RegisterFile.scala:167:24] .io_ll_write_bits_eg (_ll_arb_io_out_bits_eg), // @[RegisterFile.scala:167:24] .io_ll_write_bits_data (_ll_arb_io_out_bits_data), // @[RegisterFile.scala:167:24] .io_ll_write_bits_mask (_ll_arb_io_out_bits_mask) // @[RegisterFile.scala:167:24] ); // @[RegisterFile.scala:134:38] RegisterFileBank vrf_1 ( // @[RegisterFile.scala:134:38] .clock (clock), .reset (reset), .io_read_0_req_ready (_vrf_1_io_read_0_req_ready), .io_read_0_req_valid (_xbar_io_out_1_req_valid), // @[RegisterFile.scala:137:22] .io_read_0_req_bits_eg (_xbar_io_out_1_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_0_resp (_vrf_1_io_read_0_resp), .io_read_1_req_ready (_vrf_1_io_read_1_req_ready), .io_read_1_req_valid (_xbar_1_io_out_1_req_valid), // @[RegisterFile.scala:137:22] .io_read_1_req_bits_eg (_xbar_1_io_out_1_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_1_resp (_vrf_1_io_read_1_resp), .io_read_2_req_ready (_vrf_1_io_read_2_req_ready), .io_read_2_req_valid (_xbar_2_io_out_1_req_valid), // @[RegisterFile.scala:137:22] .io_read_2_req_bits_eg (_xbar_2_io_out_1_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_2_resp (_vrf_1_io_read_2_resp), .io_mask_read_0_req_ready (_vrf_1_io_mask_read_0_req_ready), .io_mask_read_0_req_bits_eg (_mask_xbar_io_out_1_req_bits_eg), // @[RegisterFile.scala:145:27] .io_mask_read_0_resp (_vrf_1_io_mask_read_0_resp), .io_write_valid (io_pipe_writes_0_bits_eg[1:0] == 2'h1 & io_pipe_writes_0_valid), // @[Bundles.scala:112:49] .io_write_bits_eg (vrf_3_io_write_bits_eg), // @[RegisterFile.scala:164:25] .io_write_bits_data (io_pipe_writes_0_bits_data), .io_write_bits_mask (io_pipe_writes_0_bits_mask), .io_ll_write_ready (_vrf_1_io_ll_write_ready), .io_ll_write_valid (_ll_arb_1_io_out_valid), // @[RegisterFile.scala:167:24] .io_ll_write_bits_eg (_ll_arb_1_io_out_bits_eg), // @[RegisterFile.scala:167:24] .io_ll_write_bits_data (_ll_arb_1_io_out_bits_data), // @[RegisterFile.scala:167:24] .io_ll_write_bits_mask (_ll_arb_1_io_out_bits_mask) // @[RegisterFile.scala:167:24] ); // @[RegisterFile.scala:134:38] RegisterFileBank vrf_2 ( // @[RegisterFile.scala:134:38] .clock (clock), .reset (reset), .io_read_0_req_ready (_vrf_2_io_read_0_req_ready), .io_read_0_req_valid (_xbar_io_out_2_req_valid), // @[RegisterFile.scala:137:22] .io_read_0_req_bits_eg (_xbar_io_out_2_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_0_resp (_vrf_2_io_read_0_resp), .io_read_1_req_ready (_vrf_2_io_read_1_req_ready), .io_read_1_req_valid (_xbar_1_io_out_2_req_valid), // @[RegisterFile.scala:137:22] .io_read_1_req_bits_eg (_xbar_1_io_out_2_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_1_resp (_vrf_2_io_read_1_resp), .io_read_2_req_ready (_vrf_2_io_read_2_req_ready), .io_read_2_req_valid (_xbar_2_io_out_2_req_valid), // @[RegisterFile.scala:137:22] .io_read_2_req_bits_eg (_xbar_2_io_out_2_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_2_resp (_vrf_2_io_read_2_resp), .io_mask_read_0_req_ready (_vrf_2_io_mask_read_0_req_ready), .io_mask_read_0_req_bits_eg (_mask_xbar_io_out_2_req_bits_eg), // @[RegisterFile.scala:145:27] .io_mask_read_0_resp (_vrf_2_io_mask_read_0_resp), .io_write_valid (io_pipe_writes_0_bits_eg[1:0] == 2'h2 & io_pipe_writes_0_valid), // @[Bundles.scala:112:49] .io_write_bits_eg (vrf_3_io_write_bits_eg), // @[RegisterFile.scala:164:25] .io_write_bits_data (io_pipe_writes_0_bits_data), .io_write_bits_mask (io_pipe_writes_0_bits_mask), .io_ll_write_ready (_vrf_2_io_ll_write_ready), .io_ll_write_valid (_ll_arb_2_io_out_valid), // @[RegisterFile.scala:167:24] .io_ll_write_bits_eg (_ll_arb_2_io_out_bits_eg), // @[RegisterFile.scala:167:24] .io_ll_write_bits_data (_ll_arb_2_io_out_bits_data), // @[RegisterFile.scala:167:24] .io_ll_write_bits_mask (_ll_arb_2_io_out_bits_mask) // @[RegisterFile.scala:167:24] ); // @[RegisterFile.scala:134:38] RegisterFileBank vrf_3 ( // @[RegisterFile.scala:134:38] .clock (clock), .reset (reset), .io_read_0_req_ready (_vrf_3_io_read_0_req_ready), .io_read_0_req_valid (_xbar_io_out_3_req_valid), // @[RegisterFile.scala:137:22] .io_read_0_req_bits_eg (_xbar_io_out_3_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_0_resp (_vrf_3_io_read_0_resp), .io_read_1_req_ready (_vrf_3_io_read_1_req_ready), .io_read_1_req_valid (_xbar_1_io_out_3_req_valid), // @[RegisterFile.scala:137:22] .io_read_1_req_bits_eg (_xbar_1_io_out_3_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_1_resp (_vrf_3_io_read_1_resp), .io_read_2_req_ready (_vrf_3_io_read_2_req_ready), .io_read_2_req_valid (_xbar_2_io_out_3_req_valid), // @[RegisterFile.scala:137:22] .io_read_2_req_bits_eg (_xbar_2_io_out_3_req_bits_eg), // @[RegisterFile.scala:137:22] .io_read_2_resp (_vrf_3_io_read_2_resp), .io_mask_read_0_req_ready (_vrf_3_io_mask_read_0_req_ready), .io_mask_read_0_req_bits_eg (_mask_xbar_io_out_3_req_bits_eg), // @[RegisterFile.scala:145:27] .io_mask_read_0_resp (_vrf_3_io_mask_read_0_resp), .io_write_valid ((&(io_pipe_writes_0_bits_eg[1:0])) & io_pipe_writes_0_valid), // @[Bundles.scala:112:49] .io_write_bits_eg (vrf_3_io_write_bits_eg), // @[RegisterFile.scala:164:25] .io_write_bits_data (io_pipe_writes_0_bits_data), .io_write_bits_mask (io_pipe_writes_0_bits_mask), .io_ll_write_ready (_vrf_3_io_ll_write_ready), .io_ll_write_valid (_ll_arb_3_io_out_valid), // @[RegisterFile.scala:167:24] .io_ll_write_bits_eg (_ll_arb_3_io_out_bits_eg), // @[RegisterFile.scala:167:24] .io_ll_write_bits_data (_ll_arb_3_io_out_bits_data), // @[RegisterFile.scala:167:24] .io_ll_write_bits_mask (_ll_arb_3_io_out_bits_mask) // @[RegisterFile.scala:167:24] ); // @[RegisterFile.scala:134:38] RegisterReadXbar xbar ( // @[RegisterFile.scala:137:22] .clock (clock), .io_in_0_req_ready (io_read_0_0_req_ready), .io_in_0_req_valid (io_read_0_0_req_valid), .io_in_0_req_bits_eg (io_read_0_0_req_bits_eg), .io_in_0_req_bits_oldest (io_read_0_0_req_bits_oldest), .io_in_0_resp (io_read_0_0_resp), .io_in_1_req_ready (io_read_0_1_req_ready), .io_in_1_req_valid (io_read_0_1_req_valid), .io_in_1_req_bits_eg (io_read_0_1_req_bits_eg), .io_in_1_req_bits_oldest (io_read_0_1_req_bits_oldest), .io_in_1_resp (io_read_0_1_resp), .io_in_2_req_ready (io_read_0_2_req_ready), .io_in_2_req_valid (io_read_0_2_req_valid), .io_in_2_req_bits_eg (io_read_0_2_req_bits_eg), .io_in_2_resp (io_read_0_2_resp), .io_out_0_req_ready (_vrf_0_io_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_0_req_valid (_xbar_io_out_0_req_valid), .io_out_0_req_bits_eg (_xbar_io_out_0_req_bits_eg), .io_out_0_resp (_vrf_0_io_read_0_resp), // @[RegisterFile.scala:134:38] .io_out_1_req_ready (_vrf_1_io_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_1_req_valid (_xbar_io_out_1_req_valid), .io_out_1_req_bits_eg (_xbar_io_out_1_req_bits_eg), .io_out_1_resp (_vrf_1_io_read_0_resp), // @[RegisterFile.scala:134:38] .io_out_2_req_ready (_vrf_2_io_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_2_req_valid (_xbar_io_out_2_req_valid), .io_out_2_req_bits_eg (_xbar_io_out_2_req_bits_eg), .io_out_2_resp (_vrf_2_io_read_0_resp), // @[RegisterFile.scala:134:38] .io_out_3_req_ready (_vrf_3_io_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_3_req_valid (_xbar_io_out_3_req_valid), .io_out_3_req_bits_eg (_xbar_io_out_3_req_bits_eg), .io_out_3_resp (_vrf_3_io_read_0_resp) // @[RegisterFile.scala:134:38] ); // @[RegisterFile.scala:137:22] RegisterReadXbar_1 xbar_1 ( // @[RegisterFile.scala:137:22] .io_in_0_req_ready (io_read_1_0_req_ready), .io_in_0_req_valid (io_read_1_0_req_valid), .io_in_0_req_bits_eg (io_read_1_0_req_bits_eg), .io_in_0_req_bits_oldest (io_read_1_0_req_bits_oldest), .io_in_0_resp (io_read_1_0_resp), .io_out_0_req_ready (_vrf_0_io_read_1_req_ready), // @[RegisterFile.scala:134:38] .io_out_0_req_valid (_xbar_1_io_out_0_req_valid), .io_out_0_req_bits_eg (_xbar_1_io_out_0_req_bits_eg), .io_out_0_resp (_vrf_0_io_read_1_resp), // @[RegisterFile.scala:134:38] .io_out_1_req_ready (_vrf_1_io_read_1_req_ready), // @[RegisterFile.scala:134:38] .io_out_1_req_valid (_xbar_1_io_out_1_req_valid), .io_out_1_req_bits_eg (_xbar_1_io_out_1_req_bits_eg), .io_out_1_resp (_vrf_1_io_read_1_resp), // @[RegisterFile.scala:134:38] .io_out_2_req_ready (_vrf_2_io_read_1_req_ready), // @[RegisterFile.scala:134:38] .io_out_2_req_valid (_xbar_1_io_out_2_req_valid), .io_out_2_req_bits_eg (_xbar_1_io_out_2_req_bits_eg), .io_out_2_resp (_vrf_2_io_read_1_resp), // @[RegisterFile.scala:134:38] .io_out_3_req_ready (_vrf_3_io_read_1_req_ready), // @[RegisterFile.scala:134:38] .io_out_3_req_valid (_xbar_1_io_out_3_req_valid), .io_out_3_req_bits_eg (_xbar_1_io_out_3_req_bits_eg), .io_out_3_resp (_vrf_3_io_read_1_resp) // @[RegisterFile.scala:134:38] ); // @[RegisterFile.scala:137:22] RegisterReadXbar_2 xbar_2 ( // @[RegisterFile.scala:137:22] .clock (clock), .io_in_0_req_ready (io_read_2_0_req_ready), .io_in_0_req_valid (io_read_2_0_req_valid), .io_in_0_req_bits_eg (io_read_2_0_req_bits_eg), .io_in_0_req_bits_oldest (io_read_2_0_req_bits_oldest), .io_in_0_resp (io_read_2_0_resp), .io_in_1_req_ready (io_read_2_1_req_ready), .io_in_1_req_valid (io_read_2_1_req_valid), .io_in_1_req_bits_eg (io_read_2_1_req_bits_eg), .io_in_1_req_bits_oldest (io_read_2_1_req_bits_oldest), .io_in_1_resp (io_read_2_1_resp), .io_out_0_req_ready (_vrf_0_io_read_2_req_ready), // @[RegisterFile.scala:134:38] .io_out_0_req_valid (_xbar_2_io_out_0_req_valid), .io_out_0_req_bits_eg (_xbar_2_io_out_0_req_bits_eg), .io_out_0_resp (_vrf_0_io_read_2_resp), // @[RegisterFile.scala:134:38] .io_out_1_req_ready (_vrf_1_io_read_2_req_ready), // @[RegisterFile.scala:134:38] .io_out_1_req_valid (_xbar_2_io_out_1_req_valid), .io_out_1_req_bits_eg (_xbar_2_io_out_1_req_bits_eg), .io_out_1_resp (_vrf_1_io_read_2_resp), // @[RegisterFile.scala:134:38] .io_out_2_req_ready (_vrf_2_io_read_2_req_ready), // @[RegisterFile.scala:134:38] .io_out_2_req_valid (_xbar_2_io_out_2_req_valid), .io_out_2_req_bits_eg (_xbar_2_io_out_2_req_bits_eg), .io_out_2_resp (_vrf_2_io_read_2_resp), // @[RegisterFile.scala:134:38] .io_out_3_req_ready (_vrf_3_io_read_2_req_ready), // @[RegisterFile.scala:134:38] .io_out_3_req_valid (_xbar_2_io_out_3_req_valid), .io_out_3_req_bits_eg (_xbar_2_io_out_3_req_bits_eg), .io_out_3_resp (_vrf_3_io_read_2_resp) // @[RegisterFile.scala:134:38] ); // @[RegisterFile.scala:137:22] RegisterReadXbar_3 mask_xbar ( // @[RegisterFile.scala:145:27] .clock (clock), .io_in_0_req_ready (io_mask_read_0_0_req_ready), .io_in_0_req_valid (io_mask_read_0_0_req_valid), .io_in_0_req_bits_oldest (io_mask_read_0_0_req_bits_oldest), .io_in_0_resp (io_mask_read_0_0_resp), .io_in_1_req_ready (io_mask_read_0_1_req_ready), .io_in_1_req_valid (io_mask_read_0_1_req_valid), .io_in_1_req_bits_oldest (io_mask_read_0_1_req_bits_oldest), .io_in_1_resp (io_mask_read_0_1_resp), .io_in_2_req_ready (io_mask_read_0_2_req_ready), .io_in_2_req_valid (io_mask_read_0_2_req_valid), .io_in_2_req_bits_oldest (io_mask_read_0_2_req_bits_oldest), .io_in_2_resp (io_mask_read_0_2_resp), .io_in_3_req_ready (io_mask_read_0_3_req_ready), .io_in_3_req_valid (io_mask_read_0_3_req_valid), .io_in_3_req_bits_oldest (io_mask_read_0_3_req_bits_oldest), .io_in_3_resp (io_mask_read_0_3_resp), .io_in_4_req_ready (io_mask_read_0_4_req_ready), .io_in_4_req_valid (io_mask_read_0_4_req_valid), .io_in_4_req_bits_eg (io_mask_read_0_4_req_bits_eg), .io_in_4_resp (io_mask_read_0_4_resp), .io_out_0_req_ready (_vrf_0_io_mask_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_0_req_bits_eg (_mask_xbar_io_out_0_req_bits_eg), .io_out_0_resp (_vrf_0_io_mask_read_0_resp), // @[RegisterFile.scala:134:38] .io_out_1_req_ready (_vrf_1_io_mask_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_1_req_bits_eg (_mask_xbar_io_out_1_req_bits_eg), .io_out_1_resp (_vrf_1_io_mask_read_0_resp), // @[RegisterFile.scala:134:38] .io_out_2_req_ready (_vrf_2_io_mask_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_2_req_bits_eg (_mask_xbar_io_out_2_req_bits_eg), .io_out_2_resp (_vrf_2_io_mask_read_0_resp), // @[RegisterFile.scala:134:38] .io_out_3_req_ready (_vrf_3_io_mask_read_0_req_ready), // @[RegisterFile.scala:134:38] .io_out_3_req_bits_eg (_mask_xbar_io_out_3_req_bits_eg), .io_out_3_resp (_vrf_3_io_mask_read_0_resp) // @[RegisterFile.scala:134:38] ); // @[RegisterFile.scala:145:27] Arbiter3_VectorWrite ll_arb ( // @[RegisterFile.scala:167:24] .io_in_0_ready (_ll_arb_io_in_0_ready), .io_in_0_valid (io_ll_writes_0_valid & ~(|(io_ll_writes_0_bits_eg[1:0]))), // @[Bundles.scala:112:49] .io_in_0_bits_eg (ll_arb_io_in_0_bits_eg), // @[RegisterFile.scala:172:33] .io_in_0_bits_data (io_ll_writes_0_bits_data), .io_in_0_bits_mask (io_ll_writes_0_bits_mask), .io_in_1_valid (io_ll_writes_1_valid & io_ll_writes_1_bits_eg[1:0] == 2'h0), // @[Bundles.scala:112:49] .io_in_1_bits_eg (ll_arb_io_in_1_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_ready (_ll_arb_io_in_2_ready), .io_in_2_valid (io_ll_writes_2_valid & ~(|(io_ll_writes_2_bits_eg[1:0]))), // @[Bundles.scala:112:49] .io_in_2_bits_eg (ll_arb_io_in_2_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_bits_data (io_ll_writes_2_bits_data), .io_in_2_bits_mask (io_ll_writes_2_bits_mask), .io_out_ready (_vrf_0_io_ll_write_ready), // @[RegisterFile.scala:134:38] .io_out_valid (_ll_arb_io_out_valid), .io_out_bits_eg (_ll_arb_io_out_bits_eg), .io_out_bits_data (_ll_arb_io_out_bits_data), .io_out_bits_mask (_ll_arb_io_out_bits_mask) ); // @[RegisterFile.scala:167:24] Arbiter3_VectorWrite ll_arb_1 ( // @[RegisterFile.scala:167:24] .io_in_0_ready (_ll_arb_1_io_in_0_ready), .io_in_0_valid (io_ll_writes_0_valid & io_ll_writes_0_bits_eg[1:0] == 2'h1), // @[Bundles.scala:112:49] .io_in_0_bits_eg (ll_arb_io_in_0_bits_eg), // @[RegisterFile.scala:172:33] .io_in_0_bits_data (io_ll_writes_0_bits_data), .io_in_0_bits_mask (io_ll_writes_0_bits_mask), .io_in_1_valid (io_ll_writes_1_valid & io_ll_writes_1_bits_eg[1:0] == 2'h1), // @[Bundles.scala:112:49] .io_in_1_bits_eg (ll_arb_io_in_1_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_ready (_ll_arb_1_io_in_2_ready), .io_in_2_valid (io_ll_writes_2_valid & io_ll_writes_2_bits_eg[1:0] == 2'h1), // @[Bundles.scala:112:49] .io_in_2_bits_eg (ll_arb_io_in_2_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_bits_data (io_ll_writes_2_bits_data), .io_in_2_bits_mask (io_ll_writes_2_bits_mask), .io_out_ready (_vrf_1_io_ll_write_ready), // @[RegisterFile.scala:134:38] .io_out_valid (_ll_arb_1_io_out_valid), .io_out_bits_eg (_ll_arb_1_io_out_bits_eg), .io_out_bits_data (_ll_arb_1_io_out_bits_data), .io_out_bits_mask (_ll_arb_1_io_out_bits_mask) ); // @[RegisterFile.scala:167:24] Arbiter3_VectorWrite ll_arb_2 ( // @[RegisterFile.scala:167:24] .io_in_0_ready (_ll_arb_2_io_in_0_ready), .io_in_0_valid (io_ll_writes_0_valid & io_ll_writes_0_bits_eg[1:0] == 2'h2), // @[Bundles.scala:112:49] .io_in_0_bits_eg (ll_arb_io_in_0_bits_eg), // @[RegisterFile.scala:172:33] .io_in_0_bits_data (io_ll_writes_0_bits_data), .io_in_0_bits_mask (io_ll_writes_0_bits_mask), .io_in_1_valid (io_ll_writes_1_valid & io_ll_writes_1_bits_eg[1:0] == 2'h2), // @[Bundles.scala:112:49] .io_in_1_bits_eg (ll_arb_io_in_1_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_ready (_ll_arb_2_io_in_2_ready), .io_in_2_valid (io_ll_writes_2_valid & io_ll_writes_2_bits_eg[1:0] == 2'h2), // @[Bundles.scala:112:49] .io_in_2_bits_eg (ll_arb_io_in_2_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_bits_data (io_ll_writes_2_bits_data), .io_in_2_bits_mask (io_ll_writes_2_bits_mask), .io_out_ready (_vrf_2_io_ll_write_ready), // @[RegisterFile.scala:134:38] .io_out_valid (_ll_arb_2_io_out_valid), .io_out_bits_eg (_ll_arb_2_io_out_bits_eg), .io_out_bits_data (_ll_arb_2_io_out_bits_data), .io_out_bits_mask (_ll_arb_2_io_out_bits_mask) ); // @[RegisterFile.scala:167:24] Arbiter3_VectorWrite ll_arb_3 ( // @[RegisterFile.scala:167:24] .io_in_0_ready (_ll_arb_3_io_in_0_ready), .io_in_0_valid (io_ll_writes_0_valid & (&(io_ll_writes_0_bits_eg[1:0]))), // @[Bundles.scala:112:49] .io_in_0_bits_eg (ll_arb_io_in_0_bits_eg), // @[RegisterFile.scala:172:33] .io_in_0_bits_data (io_ll_writes_0_bits_data), .io_in_0_bits_mask (io_ll_writes_0_bits_mask), .io_in_1_valid (io_ll_writes_1_valid & (&(io_ll_writes_1_bits_eg[1:0]))), // @[Bundles.scala:112:49] .io_in_1_bits_eg (ll_arb_io_in_1_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_ready (_ll_arb_3_io_in_2_ready), .io_in_2_valid (io_ll_writes_2_valid & (&(io_ll_writes_2_bits_eg[1:0]))), // @[Bundles.scala:112:49] .io_in_2_bits_eg (ll_arb_io_in_2_bits_eg), // @[RegisterFile.scala:172:33] .io_in_2_bits_data (io_ll_writes_2_bits_data), .io_in_2_bits_mask (io_ll_writes_2_bits_mask), .io_out_ready (_vrf_3_io_ll_write_ready), // @[RegisterFile.scala:134:38] .io_out_valid (_ll_arb_3_io_out_valid), .io_out_bits_eg (_ll_arb_3_io_out_bits_eg), .io_out_bits_data (_ll_arb_3_io_out_bits_data), .io_out_bits_mask (_ll_arb_3_io_out_bits_mask) ); // @[RegisterFile.scala:167:24] assign io_ll_writes_0_ready = _ll_arb_3_io_in_0_ready & (&(io_ll_writes_0_bits_eg[1:0])) | _ll_arb_2_io_in_0_ready & io_ll_writes_0_bits_eg[1:0] == 2'h2 | _ll_arb_1_io_in_0_ready & io_ll_writes_0_bits_eg[1:0] == 2'h1 | _ll_arb_io_in_0_ready & ~(|(io_ll_writes_0_bits_eg[1:0])); // @[Bundles.scala:112:49] assign io_ll_writes_2_ready = _ll_arb_3_io_in_2_ready & (&(io_ll_writes_2_bits_eg[1:0])) | _ll_arb_2_io_in_2_ready & io_ll_writes_2_bits_eg[1:0] == 2'h2 | _ll_arb_1_io_in_2_ready & io_ll_writes_2_bits_eg[1:0] == 2'h1 | _ll_arb_io_in_2_ready & ~(|(io_ll_writes_2_bits_eg[1:0])); // @[Bundles.scala:112:49] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_483( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid, // @[PE.scala:35:14] output io_bad_dataflow // @[PE.scala:35:14] ); wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24] wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7] wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [31:0] c1; // @[PE.scala:70:15] wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [31:0] c2; // @[PE.scala:71:15] wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25] wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28] wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33] wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60] wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16] wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7] wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7] wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18] wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18] assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18] assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18] assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18] assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18] wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18] wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18] wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}] wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}] wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30] wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28] wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33] wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60] wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16] wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18] wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18] assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18] assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18] assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18] assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18] wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18] wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18] wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}] wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}] wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61] wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38] wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38] assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16] assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10] wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0) begin // @[PE.scala:31:7] if (io_in_control_dataflow_0) begin // @[PE.scala:31:7] if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10] c1 <= _GEN_7; // @[PE.scala:70:15, :124:10] if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30] end else // @[PE.scala:71:15, :118:101, :119:30] c2 <= _GEN_7; // @[PE.scala:71:15, :124:10] end else begin // @[PE.scala:31:7] c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10] c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10] end last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] end always @(posedge) MacUnit_227 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24] .io_out_d (_mac_unit_io_out_d) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_19( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:56:32] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1027:0] _c_sizes_set_T_1 = 1028'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [519:0] c_sizes_set = 520'h0; // @[Monitor.scala:741:34] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_25 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_33 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_26 = _source_ok_T_25 == 5'hA; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_29 = source_ok_uncommonBits_4 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_34 = _source_ok_T_33 == 5'h8; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_37 = source_ok_uncommonBits_5 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_38 = _source_ok_T_36 & _source_ok_T_37; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31] wire _source_ok_T_39 = io_in_a_bits_source_0 == 7'h23; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31] wire _source_ok_T_40 = io_in_a_bits_source_0 == 7'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31] wire _source_ok_T_41 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_11 = _source_ok_T_41; // @[Parameters.scala:1138:31] wire _source_ok_T_42 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_51 | _source_ok_WIRE_11; // @[Parameters.scala:1138:31, :1139:46] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [28:0] _is_aligned_T = {17'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_52 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_52; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_53 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_59 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_65 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_71 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_77 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_85 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_54 = _source_ok_T_53 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_58; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_60 = _source_ok_T_59 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_64; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_66 = _source_ok_T_65 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_70; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_72 = _source_ok_T_71 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_76; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_78 = _source_ok_T_77 == 5'hA; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_81 = source_ok_uncommonBits_10 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_82 = _source_ok_T_80 & _source_ok_T_81; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_1_5 = _source_ok_T_82; // @[Parameters.scala:1138:31] wire _source_ok_T_83 = io_in_d_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_83; // @[Parameters.scala:1138:31] wire _source_ok_T_84 = io_in_d_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_84; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_86 = _source_ok_T_85 == 5'h8; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_89 = source_ok_uncommonBits_11 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_90 = _source_ok_T_88 & _source_ok_T_89; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_1_8 = _source_ok_T_90; // @[Parameters.scala:1138:31] wire _source_ok_T_91 = io_in_d_bits_source_0 == 7'h23; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_91; // @[Parameters.scala:1138:31] wire _source_ok_T_92 = io_in_d_bits_source_0 == 7'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_10 = _source_ok_T_92; // @[Parameters.scala:1138:31] wire _source_ok_T_93 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_11 = _source_ok_T_93; // @[Parameters.scala:1138:31] wire _source_ok_T_94 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_100 = _source_ok_T_99 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_101 = _source_ok_T_100 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_102 = _source_ok_T_101 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_103 = _source_ok_T_102 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_103 | _source_ok_WIRE_1_11; // @[Parameters.scala:1138:31, :1139:46] wire _T_1760 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1760; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1760; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] wire _T_1833 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1833; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1833; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1833; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [519:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [519:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [9:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [519:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [519:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [519:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[519:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_3 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1686 = _T_1760 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1686 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1686 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1686 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1686 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [9:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [1027:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1686 ? _a_sizes_set_T_1[519:0] : 520'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [519:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1732 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1732 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1701 = _T_1833 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1701 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1701 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1701 ? _d_sizes_clr_T_5[519:0] : 520'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [519:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [519:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [519:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [519:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [519:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [519:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [519:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [519:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[519:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [519:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1804 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1804 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1786 = _T_1833 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1786 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1786 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1786 ? _d_sizes_clr_T_11[519:0] : 520'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [519:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [519:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_130( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_140 io_out_sink_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_107( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_179 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_236( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_27( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_27 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RecFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class RecFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val in = Input(Bits((inExpWidth + inSigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in); if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- io.out := io.in<<(outSigWidth - inSigWidth) io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W) } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( inExpWidth, inSigWidth, outExpWidth, outSigWidth, flRoundOpt_sigMSBitAlwaysZero )) roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn) roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := rawIn roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module RecFNToRecFN_20( // @[RecFNToRecFN.scala:44:5] input [64:0] io_in, // @[RecFNToRecFN.scala:48:16] input [2:0] io_roundingMode, // @[RecFNToRecFN.scala:48:16] output [32:0] io_out, // @[RecFNToRecFN.scala:48:16] output [4:0] io_exceptionFlags // @[RecFNToRecFN.scala:48:16] ); wire rawIn_isNaN = (&(io_in[63:62])) & io_in[61]; // @[rawFloatFromRecFN.scala:51:21, :53:{28,53}, :56:{33,41}] RoundAnyRawFNToRecFN_ie11_is53_oe8_os24 roundAnyRawFNToRecFN ( // @[RecFNToRecFN.scala:72:19] .io_invalidExc (rawIn_isNaN & ~(io_in[51])), // @[rawFloatFromRecFN.scala:56:33] .io_in_isNaN (rawIn_isNaN), // @[rawFloatFromRecFN.scala:56:33] .io_in_isInf ((&(io_in[63:62])) & ~(io_in[61])), // @[rawFloatFromRecFN.scala:51:21, :53:{28,53}, :56:41, :57:{33,36}] .io_in_isZero (~(|(io_in[63:61]))), // @[rawFloatFromRecFN.scala:51:21, :52:{28,53}] .io_in_sign (io_in[64]), // @[rawFloatFromRecFN.scala:59:25] .io_in_sExp ({1'h0, io_in[63:52]}), // @[rawFloatFromRecFN.scala:51:21, :60:27] .io_in_sig ({1'h0, |(io_in[63:61]), io_in[51:0]}), // @[rawFloatFromRecFN.scala:51:21, :52:{28,53}, :61:{44,49}] .io_roundingMode (io_roundingMode), .io_out (io_out), .io_exceptionFlags (io_exceptionFlags) ); // @[RecFNToRecFN.scala:72:19] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File Nodes.scala: package constellation.channel import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Parameters, Field} import freechips.rocketchip.diplomacy._ case class EmptyParams() case class ChannelEdgeParams(cp: ChannelParams, p: Parameters) object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] { def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = { ChannelEdgeParams(pu, p) } def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p) def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) { RenderedEdge(colour = "ffffff", label = "X") } else { RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString) } override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = { val monitor = Module(new NoCMonitor(edge.cp)(edge.p)) monitor.io.in := bundle } // TODO: Add nodepath stuff? override def mixO, override def mixI } case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams())) case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams)) case class ChannelAdapterNode( slaveFn: ChannelParams => ChannelParams = { d => d })( implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn) case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)() case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)() case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters) case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters) object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] { def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = { IngressChannelEdgeParams(pu, p) } def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p) def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) { RenderedEdge(colour = "ffffff", label = "X") } else { RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString) } } object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] { def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = { EgressChannelEdgeParams(pu, p) } def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p) def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) { RenderedEdge(colour = "ffffff", label = "X") } else { RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString) } } case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams())) case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams)) case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams())) case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams)) case class IngressChannelAdapterNode( slaveFn: IngressChannelParams => IngressChannelParams = { d => d })( implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn) case class EgressChannelAdapterNode( slaveFn: EgressChannelParams => EgressChannelParams = { d => d })( implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn) case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)() case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)() case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)() case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)() File Router.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{RoutingRelation} import constellation.noc.{HasNoCParams} case class UserRouterParams( // Payload width. Must match payload width on all channels attached to this routing node payloadBits: Int = 64, // Combines SA and ST stages (removes pipeline register) combineSAST: Boolean = false, // Combines RC and VA stages (removes pipeline register) combineRCVA: Boolean = false, // Adds combinational path from SA to VA coupleSAVA: Boolean = false, vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p) ) case class RouterParams( nodeId: Int, nIngress: Int, nEgress: Int, user: UserRouterParams ) trait HasRouterOutputParams { def outParams: Seq[ChannelParams] def egressParams: Seq[EgressChannelParams] def allOutParams = outParams ++ egressParams def nOutputs = outParams.size def nEgress = egressParams.size def nAllOutputs = allOutParams.size } trait HasRouterInputParams { def inParams: Seq[ChannelParams] def ingressParams: Seq[IngressChannelParams] def allInParams = inParams ++ ingressParams def nInputs = inParams.size def nIngress = ingressParams.size def nAllInputs = allInParams.size } trait HasRouterParams { def routerParams: RouterParams def nodeId = routerParams.nodeId def payloadBits = routerParams.user.payloadBits } class DebugBundle(val nIn: Int) extends Bundle { val va_stall = Vec(nIn, UInt()) val sa_stall = Vec(nIn, UInt()) } class Router( val routerParams: RouterParams, preDiplomaticInParams: Seq[ChannelParams], preDiplomaticIngressParams: Seq[IngressChannelParams], outDests: Seq[Int], egressIds: Seq[Int] )(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams { val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u)) val sourceNodes = outDests.map(u => ChannelSourceNode(u)) val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u)) val egressNodes = egressIds.map(u => EgressChannelSourceNode(u)) val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size)) val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None def inParams = module.inParams def outParams = module.outParams def ingressParams = module.ingressParams def egressParams = module.egressParams lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams { val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip val io_debug = debugNode.out(0)._1 val inParams = edgesIn.map(_.cp) val outParams = edgesOut.map(_.cp) val ingressParams = edgesIngress.map(_.cp) val egressParams = edgesEgress.map(_.cp) allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits)) allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits)) require(nIngress == routerParams.nIngress) require(nEgress == routerParams.nEgress) require(nAllInputs >= 1) require(nAllOutputs >= 1) require(nodeId < (1 << nodeIdBits)) val input_units = inParams.zipWithIndex.map { case (u,i) => Module(new InputUnit(u, outParams, egressParams, routerParams.user.combineRCVA, routerParams.user.combineSAST)) .suggestName(s"input_unit_${i}_from_${u.srcId}") } val ingress_units = ingressParams.zipWithIndex.map { case (u,i) => Module(new IngressUnit(i, u, outParams, egressParams, routerParams.user.combineRCVA, routerParams.user.combineSAST)) .suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") } val all_input_units = input_units ++ ingress_units val output_units = outParams.zipWithIndex.map { case (u,i) => Module(new OutputUnit(inParams, ingressParams, u)) .suggestName(s"output_unit_${i}_to_${u.destId}")} val egress_units = egressParams.zipWithIndex.map { case (u,i) => Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1, routerParams.user.combineSAST, inParams, ingressParams, u)) .suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")} val all_output_units = output_units ++ egress_units val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams)) val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams)) val vc_allocator = Module(routerParams.user.vcAllocator( VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams) )(p)) val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams)) val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire))) dontTouch(fires_count) (io_in zip input_units ).foreach { case (i,u) => u.io.in <> i } (io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit } (output_units zip io_out ).foreach { case (u,o) => o <> u.io.out } (egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out } (route_computer.io.req zip all_input_units).foreach { case (i,u) => i <> u.io.router_req } (all_input_units zip route_computer.io.resp).foreach { case (u,o) => u.io.router_resp <> o } (vc_allocator.io.req zip all_input_units).foreach { case (i,u) => i <> u.io.vcalloc_req } (all_input_units zip vc_allocator.io.resp).foreach { case (u,o) => u.io.vcalloc_resp <> o } (all_output_units zip vc_allocator.io.out_allocs).foreach { case (u,a) => u.io.allocs <> a } (vc_allocator.io.channel_status zip all_output_units).foreach { case (a,u) => a := u.io.channel_status } all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) => in.io.out_credit_available(outIdx) := out.io.credit_available }) (all_input_units zip switch_allocator.io.req).foreach { case (u,r) => r <> u.io.salloc_req } (all_output_units zip switch_allocator.io.credit_alloc).foreach { case (u,a) => u.io.credit_alloc := a } (switch.io.in zip all_input_units).foreach { case (i,u) => i <> u.io.out } (all_output_units zip switch.io.out).foreach { case (u,o) => u.io.in <> o } switch.io.sel := (if (routerParams.user.combineSAST) { switch_allocator.io.switch_sel } else { RegNext(switch_allocator.io.switch_sel) }) if (hasCtrl) { val io_ctrl = ctrlNode.get.out(0)._1 val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams)) io_ctrl <> ctrl.io.ctrl (all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r } (all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) } } else { input_units.foreach(_.io.block := false.B) ingress_units.foreach(_.io.block := false.B) } (io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r } (io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r } val debug_tsc = RegInit(0.U(64.W)) debug_tsc := debug_tsc + 1.U val debug_sample = RegInit(0.U(64.W)) debug_sample := debug_sample + 1.U val sample_rate = PlusArg("noc_util_sample_rate", width=20) when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U } def sample(fire: Bool, s: String) = { val util_ctr = RegInit(0.U(64.W)) val fired = RegInit(false.B) util_ctr := util_ctr + fire fired := fired || fire when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) { val fmtStr = s"nocsample %d $s %d\n" printf(fmtStr, debug_tsc, util_ctr); fired := fire } } destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f => sample(f.fire, s"${edge.cp.srcId} $nodeId") } } ingressNodes.map(_.in(0)).foreach { case (in, edge) => sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId") } egressNodes.map(_.out(0)).foreach { case (out, edge) => sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}") } } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module Router_55( // @[Router.scala:89:25] input clock, // @[Router.scala:89:25] input reset, // @[Router.scala:89:25] output [3:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25] output [3:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25] output [3:0] auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25] output [3:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25] output [3:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25] output [3:0] auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [2:0] auto_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [9:0] auto_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25] input [9:0] auto_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [2:0] auto_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [9:0] auto_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25] input [9:0] auto_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [2:0] auto_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [9:0] auto_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25] input [9:0] auto_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_2_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_dest_nodes_in_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dest_nodes_in_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [9:0] auto_dest_nodes_in_2_credit_return, // @[LazyModuleImp.scala:107:25] output [9:0] auto_dest_nodes_in_2_vc_free, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [9:0] auto_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25] output [9:0] auto_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [9:0] auto_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25] output [9:0] auto_dest_nodes_in_0_vc_free // @[LazyModuleImp.scala:107:25] ); wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire _route_computer_io_resp_2_vc_sel_0_8; // @[Router.scala:136:32] wire _route_computer_io_resp_2_vc_sel_0_9; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_0_8; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_0_9; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_3; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_4; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_5; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_6; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_7; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_8; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_9; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_3; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_4; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_5; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_6; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_7; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_8; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_9; // @[Router.scala:136:32] wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30] wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30] wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_0_8; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_0_9; // @[Router.scala:133:30] wire _vc_allocator_io_resp_1_vc_sel_0_8; // @[Router.scala:133:30] wire _vc_allocator_io_resp_1_vc_sel_0_9; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_3; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_4; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_5; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_6; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_7; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_8; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_9; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_3; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_4; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_5; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_6; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_7; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_8; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_1_9; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_3_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_4_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_5_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_6_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_7_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_8_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_9_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_3_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_4_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_5_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_6_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_7_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_8_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_9_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_0_8_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_0_9_alloc; // @[Router.scala:133:30] wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34] wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34] wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_3_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_4_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_5_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_6_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_7_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_8_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_9_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_3_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_4_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_5_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_6_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_7_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_8_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_9_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_0_8_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_0_9_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34] wire _switch_io_out_2_0_valid; // @[Router.scala:131:24] wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24] wire [72:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24] wire [2:0] _switch_io_out_2_0_bits_flow_vnet_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_2_0_bits_flow_egress_node; // @[Router.scala:131:24] wire [2:0] _switch_io_out_2_0_bits_flow_egress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_2_0_bits_virt_channel_id; // @[Router.scala:131:24] wire _switch_io_out_1_0_valid; // @[Router.scala:131:24] wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24] wire [72:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24] wire [2:0] _switch_io_out_1_0_bits_flow_vnet_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_1_0_bits_flow_egress_node; // @[Router.scala:131:24] wire [2:0] _switch_io_out_1_0_bits_flow_egress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_1_0_bits_virt_channel_id; // @[Router.scala:131:24] wire _switch_io_out_0_0_valid; // @[Router.scala:131:24] wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24] wire [72:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24] wire [2:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24] wire [2:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24] wire _output_unit_2_to_15_io_credit_available_3; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_credit_available_4; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_credit_available_5; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_credit_available_6; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_credit_available_7; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_credit_available_8; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_credit_available_9; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_3_occupied; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_4_occupied; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_5_occupied; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_6_occupied; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_7_occupied; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_8_occupied; // @[Router.scala:122:13] wire _output_unit_2_to_15_io_channel_status_9_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_3; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_4; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_5; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_6; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_7; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_8; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_credit_available_9; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_3_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_4_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_5_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_6_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_7_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_8_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_9_io_channel_status_9_occupied; // @[Router.scala:122:13] wire _output_unit_0_to_0_io_credit_available_8; // @[Router.scala:122:13] wire _output_unit_0_to_0_io_credit_available_9; // @[Router.scala:122:13] wire _output_unit_0_to_0_io_channel_status_8_occupied; // @[Router.scala:122:13] wire _output_unit_0_to_0_io_channel_status_9_occupied; // @[Router.scala:122:13] wire [3:0] _input_unit_2_from_15_io_router_req_bits_src_virt_id; // @[Router.scala:112:13] wire [2:0] _input_unit_2_from_15_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_2_from_15_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_2_from_15_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_2_from_15_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13] wire [2:0] _input_unit_2_from_15_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_vcalloc_req_valid; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_valid; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_3; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_4; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_5; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_6; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_7; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_8; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_2_9; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_3; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_4; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_5; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_6; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_7; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_8; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_1_9; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_salloc_req_0_bits_tail; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_out_0_valid; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_out_0_bits_flit_head; // @[Router.scala:112:13] wire _input_unit_2_from_15_io_out_0_bits_flit_tail; // @[Router.scala:112:13] wire [72:0] _input_unit_2_from_15_io_out_0_bits_flit_payload; // @[Router.scala:112:13] wire [2:0] _input_unit_2_from_15_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_2_from_15_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_2_from_15_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_2_from_15_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13] wire [2:0] _input_unit_2_from_15_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_2_from_15_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_9_io_router_req_bits_src_virt_id; // @[Router.scala:112:13] wire [2:0] _input_unit_1_from_9_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_9_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_1_from_9_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_9_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13] wire [2:0] _input_unit_1_from_9_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_vcalloc_req_valid; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_valid; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_3; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_4; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_5; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_6; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_7; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_8; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_9; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_3; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_4; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_5; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_6; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_7; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_8; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_9; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_salloc_req_0_bits_tail; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_out_0_valid; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_out_0_bits_flit_head; // @[Router.scala:112:13] wire _input_unit_1_from_9_io_out_0_bits_flit_tail; // @[Router.scala:112:13] wire [72:0] _input_unit_1_from_9_io_out_0_bits_flit_payload; // @[Router.scala:112:13] wire [2:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13] wire [2:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_9_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_0_io_router_req_bits_src_virt_id; // @[Router.scala:112:13] wire [2:0] _input_unit_0_from_0_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_0_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_0_from_0_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_0_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13] wire [2:0] _input_unit_0_from_0_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_valid; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_3; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_4; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_5; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_6; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_7; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_8; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_9; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_3; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_4; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_5; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_6; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_7; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_8; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_9; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_valid; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_3; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_4; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_5; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_6; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_7; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_8; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_9; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_3; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_4; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_5; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_6; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_7; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_8; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_9; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_salloc_req_0_bits_tail; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_out_0_valid; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_out_0_bits_flit_head; // @[Router.scala:112:13] wire _input_unit_0_from_0_io_out_0_bits_flit_tail; // @[Router.scala:112:13] wire [72:0] _input_unit_0_from_0_io_out_0_bits_flit_payload; // @[Router.scala:112:13] wire [2:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13] wire [2:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_0_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13] wire [1:0] fires_count = {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_0_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _input_unit_1_from_9_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_2_ready & _input_unit_2_from_15_io_vcalloc_req_valid}; // @[Decoupled.scala:51:35] reg REG_2_0_2_0; // @[Router.scala:178:14] reg REG_2_0_1_0; // @[Router.scala:178:14] reg REG_2_0_0_0; // @[Router.scala:178:14] reg REG_1_0_2_0; // @[Router.scala:178:14] reg REG_1_0_1_0; // @[Router.scala:178:14] reg REG_1_0_0_0; // @[Router.scala:178:14] reg REG_0_0_2_0; // @[Router.scala:178:14] reg REG_0_0_1_0; // @[Router.scala:178:14] reg REG_0_0_0_0; // @[Router.scala:178:14] reg [63:0] debug_tsc; // @[Router.scala:195:28] reg [63:0] debug_sample; // @[Router.scala:197:31] wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11] reg [63:0] util_ctr; // @[Router.scala:203:29] reg fired; // @[Router.scala:204:26] wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11] wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}] reg [63:0] util_ctr_1; // @[Router.scala:203:29] reg fired_1; // @[Router.scala:204:26] wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}] reg [63:0] util_ctr_2; // @[Router.scala:203:29] reg fired_2; // @[Router.scala:204:26] wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}]
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File HasTiles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.subsystem import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.debug.TLDebugModule import freechips.rocketchip.diplomacy.{DisableMonitors, FlipRendering} import freechips.rocketchip.interrupts.{IntXbar, IntSinkNode, IntSinkPortSimple, IntSyncAsyncCrossingSink} import freechips.rocketchip.tile.{MaxHartIdBits, BaseTile, InstantiableTileParams, TileParams, TilePRCIDomain, TraceBundle, PriorityMuxHartIdFromSeq} import freechips.rocketchip.tilelink.TLWidthWidget import freechips.rocketchip.prci.{ClockGroup, BundleBridgeBlockDuringReset, NoCrossing, SynchronousCrossing, CreditedCrossing, RationalCrossing, AsynchronousCrossing} import freechips.rocketchip.rocket.TracedInstruction import freechips.rocketchip.util.TraceCoreInterface import scala.collection.immutable.SortedMap /** Entry point for Config-uring the presence of Tiles */ case class TilesLocated(loc: HierarchicalLocation) extends Field[Seq[CanAttachTile]](Nil) /** List of HierarchicalLocations which might contain a Tile */ case object PossibleTileLocations extends Field[Seq[HierarchicalLocation]](Nil) /** For determining static tile id */ case object NumTiles extends Field[Int](0) /** Whether to add timing-closure registers along the path of the hart id * as it propagates through the subsystem and into the tile. * * These are typically only desirable when a dynamically programmable prefix is being combined * with the static hart id via [[freechips.rocketchip.subsystem.HasTiles.tileHartIdNexusNode]]. */ case object InsertTimingClosureRegistersOnHartIds extends Field[Boolean](false) /** Whether per-tile hart ids are going to be driven as inputs into a HasTiles block, * and if so, what their width should be. */ case object HasTilesExternalHartIdWidthKey extends Field[Option[Int]](None) /** Whether per-tile reset vectors are going to be driven as inputs into a HasTiles block. * * Unlike the hart ids, the reset vector width is determined by the sinks within the tiles, * based on the size of the address map visible to the tiles. */ case object HasTilesExternalResetVectorKey extends Field[Boolean](true) /** These are sources of "constants" that are driven into the tile. * * While they are not expected to change dyanmically while the tile is executing code, * they may be either tied to a contant value or programmed during boot or reset. * They need to be instantiated before tiles are attached within the subsystem containing them. */ trait HasTileInputConstants { this: LazyModule with Attachable with InstantiatesHierarchicalElements => /** tileHartIdNode is used to collect publishers and subscribers of hartids. */ val tileHartIdNodes: SortedMap[Int, BundleBridgeEphemeralNode[UInt]] = (0 until nTotalTiles).map { i => (i, BundleBridgeEphemeralNode[UInt]()) }.to(SortedMap) /** tileHartIdNexusNode is a BundleBridgeNexus that collects dynamic hart prefixes. * * Each "prefix" input is actually the same full width as the outer hart id; the expected usage * is that each prefix source would set only some non-overlapping portion of the bits to non-zero values. * This node orReduces them, and further combines the reduction with the static ids assigned to each tile, * producing a unique, dynamic hart id for each tile. * * If p(InsertTimingClosureRegistersOnHartIds) is set, the input and output values are registered. * * The output values are [[dontTouch]]'d to prevent constant propagation from pulling the values into * the tiles if they are constant, which would ruin deduplication of tiles that are otherwise homogeneous. */ val tileHartIdNexusNode = LazyModule(new BundleBridgeNexus[UInt]( inputFn = BundleBridgeNexus.orReduction[UInt](registered = p(InsertTimingClosureRegistersOnHartIds)) _, outputFn = (prefix: UInt, n: Int) => Seq.tabulate(n) { i => val y = dontTouch(prefix | totalTileIdList(i).U(p(MaxHartIdBits).W)) // dontTouch to keep constant prop from breaking tile dedup if (p(InsertTimingClosureRegistersOnHartIds)) BundleBridgeNexus.safeRegNext(y) else y }, default = Some(() => 0.U(p(MaxHartIdBits).W)), inputRequiresOutput = true, // guard against this being driven but then ignored in tileHartIdIONodes below shouldBeInlined = false // can't inline something whose output we are are dontTouching )).node // TODO: Replace the DebugModuleHartSelFuncs config key with logic to consume the dynamic hart IDs /** tileResetVectorNode is used to collect publishers and subscribers of tile reset vector addresses. */ val tileResetVectorNodes: SortedMap[Int, BundleBridgeEphemeralNode[UInt]] = (0 until nTotalTiles).map { i => (i, BundleBridgeEphemeralNode[UInt]()) }.to(SortedMap) /** tileResetVectorNexusNode is a BundleBridgeNexus that accepts a single reset vector source, and broadcasts it to all tiles. */ val tileResetVectorNexusNode = BundleBroadcast[UInt]( inputRequiresOutput = true // guard against this being driven but ignored in tileResetVectorIONodes below ) /** tileHartIdIONodes may generate subsystem IOs, one per tile, allowing the parent to assign unique hart ids. * * Or, if such IOs are not configured to exist, tileHartIdNexusNode is used to supply an id to each tile. */ val tileHartIdIONodes: Seq[BundleBridgeSource[UInt]] = p(HasTilesExternalHartIdWidthKey) match { case Some(w) => (0 until nTotalTiles).map { i => val hartIdSource = BundleBridgeSource(() => UInt(w.W)) tileHartIdNodes(i) := hartIdSource hartIdSource } case None => { (0 until nTotalTiles).map { i => tileHartIdNodes(i) :*= tileHartIdNexusNode } Nil } } /** tileResetVectorIONodes may generate subsystem IOs, one per tile, allowing the parent to assign unique reset vectors. * * Or, if such IOs are not configured to exist, tileResetVectorNexusNode is used to supply a single reset vector to every tile. */ val tileResetVectorIONodes: Seq[BundleBridgeSource[UInt]] = p(HasTilesExternalResetVectorKey) match { case true => (0 until nTotalTiles).map { i => val resetVectorSource = BundleBridgeSource[UInt]() tileResetVectorNodes(i) := resetVectorSource resetVectorSource } case false => { (0 until nTotalTiles).map { i => tileResetVectorNodes(i) :*= tileResetVectorNexusNode } Nil } } } /** These are sinks of notifications that are driven out from the tile. * * They need to be instantiated before tiles are attached to the subsystem containing them. */ trait HasTileNotificationSinks { this: LazyModule => val tileHaltXbarNode = IntXbar() val tileHaltSinkNode = IntSinkNode(IntSinkPortSimple()) tileHaltSinkNode := tileHaltXbarNode val tileWFIXbarNode = IntXbar() val tileWFISinkNode = IntSinkNode(IntSinkPortSimple()) tileWFISinkNode := tileWFIXbarNode val tileCeaseXbarNode = IntXbar() val tileCeaseSinkNode = IntSinkNode(IntSinkPortSimple()) tileCeaseSinkNode := tileCeaseXbarNode } /** Standardized interface by which parameterized tiles can be attached to contexts containing interconnect resources. * * Sub-classes of this trait can optionally override the individual connect functions in order to specialize * their attachment behaviors, but most use cases should be be handled simply by changing the implementation * of the injectNode functions in crossingParams. */ trait CanAttachTile { type TileType <: BaseTile type TileContextType <: DefaultHierarchicalElementContextType def tileParams: InstantiableTileParams[TileType] def crossingParams: HierarchicalElementCrossingParamsLike /** Narrow waist through which all tiles are intended to pass while being instantiated. */ def instantiate(allTileParams: Seq[TileParams], instantiatedTiles: SortedMap[Int, TilePRCIDomain[_]])(implicit p: Parameters): TilePRCIDomain[TileType] = { val clockSinkParams = tileParams.clockSinkParams.copy(name = Some(tileParams.uniqueName)) val tile_prci_domain = LazyModule(new TilePRCIDomain[TileType](clockSinkParams, crossingParams) { self => val element = self.element_reset_domain { LazyModule(tileParams.instantiate(crossingParams, PriorityMuxHartIdFromSeq(allTileParams))) } }) tile_prci_domain } /** A default set of connections that need to occur for most tile types */ def connect(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { connectMasterPorts(domain, context) connectSlavePorts(domain, context) connectInterrupts(domain, context) connectPRC(domain, context) connectOutputNotifications(domain, context) connectInputConstants(domain, context) connectTrace(domain, context) } /** Connect the port where the tile is the master to a TileLink interconnect. */ def connectMasterPorts(domain: TilePRCIDomain[TileType], context: Attachable): Unit = { implicit val p = context.p val dataBus = context.locateTLBusWrapper(crossingParams.master.where) dataBus.coupleFrom(tileParams.baseName) { bus => bus :=* crossingParams.master.injectNode(context) :=* domain.crossMasterPort(crossingParams.crossingType) } } /** Connect the port where the tile is the slave to a TileLink interconnect. */ def connectSlavePorts(domain: TilePRCIDomain[TileType], context: Attachable): Unit = { implicit val p = context.p DisableMonitors { implicit p => val controlBus = context.locateTLBusWrapper(crossingParams.slave.where) controlBus.coupleTo(tileParams.baseName) { bus => domain.crossSlavePort(crossingParams.crossingType) :*= crossingParams.slave.injectNode(context) :*= TLWidthWidget(controlBus.beatBytes) :*= bus } } } /** Connect the various interrupts sent to and and raised by the tile. */ def connectInterrupts(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p // NOTE: The order of calls to := matters! They must match how interrupts // are decoded from tile.intInwardNode inside the tile. For this reason, // we stub out missing interrupts with constant sources here. // 1. Debug interrupt is definitely asynchronous in all cases. domain.element.intInwardNode := domain { IntSyncAsyncCrossingSink(3) } := context.debugNodes(domain.element.tileId) // 2. The CLINT and PLIC output interrupts are synchronous to the CLINT/PLIC respectively, // so might need to be synchronized depending on the Tile's crossing type. // From CLINT: "msip" and "mtip" context.msipDomain { domain.crossIntIn(crossingParams.crossingType, domain.element.intInwardNode) := context.msipNodes(domain.element.tileId) } // From PLIC: "meip" context.meipDomain { domain.crossIntIn(crossingParams.crossingType, domain.element.intInwardNode) := context.meipNodes(domain.element.tileId) } // From PLIC: "seip" (only if supervisor mode is enabled) if (domain.element.tileParams.core.hasSupervisorMode) { context.seipDomain { domain.crossIntIn(crossingParams.crossingType, domain.element.intInwardNode) := context.seipNodes(domain.element.tileId) } } // 3. Local Interrupts ("lip") are required to already be synchronous to the Tile's clock. // (they are connected to domain.element.intInwardNode in a seperate trait) // 4. Interrupts coming out of the tile are sent to the PLIC, // so might need to be synchronized depending on the Tile's crossing type. context.tileToPlicNodes.get(domain.element.tileId).foreach { node => FlipRendering { implicit p => domain.element.intOutwardNode.foreach { out => context.toPlicDomain { node := domain.crossIntOut(crossingParams.crossingType, out) } }} } // 5. Connect NMI inputs to the tile. These inputs are synchronous to the respective core_clock. domain.element.nmiNode.foreach(_ := context.nmiNodes(domain.element.tileId)) } /** Notifications of tile status are connected to be broadcast without needing to be clock-crossed. */ def connectOutputNotifications(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p domain { context.tileHaltXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.haltNode) context.tileWFIXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.wfiNode) context.tileCeaseXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.ceaseNode) } // TODO should context be forced to have a trace sink connected here? // for now this just ensures domain.trace[Core]Node has been crossed without connecting it externally } /** Connect inputs to the tile that are assumed to be constant during normal operation, and so are not clock-crossed. */ def connectInputConstants(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p val tlBusToGetPrefixFrom = context.locateTLBusWrapper(crossingParams.mmioBaseAddressPrefixWhere) domain.element.hartIdNode := context.tileHartIdNodes(domain.element.tileId) domain.element.resetVectorNode := context.tileResetVectorNodes(domain.element.tileId) tlBusToGetPrefixFrom.prefixNode.foreach { domain.element.mmioAddressPrefixNode := _ } } /** Connect power/reset/clock resources. */ def connectPRC(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p val tlBusToGetClockDriverFrom = context.locateTLBusWrapper(crossingParams.master.where) (crossingParams.crossingType match { case _: SynchronousCrossing | _: CreditedCrossing => if (crossingParams.forceSeparateClockReset) { domain.clockNode := tlBusToGetClockDriverFrom.clockNode } else { domain.clockNode := tlBusToGetClockDriverFrom.fixedClockNode } case _: RationalCrossing => domain.clockNode := tlBusToGetClockDriverFrom.clockNode case _: AsynchronousCrossing => { val tileClockGroup = ClockGroup() tileClockGroup := context.allClockGroupsNode domain.clockNode := tileClockGroup } }) domain { domain.element_reset_domain.clockNode := crossingParams.resetCrossingType.injectClockNode := domain.clockNode } } /** Function to handle all trace crossings when tile is instantiated inside domains */ def connectTrace(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p val traceCrossingNode = BundleBridgeBlockDuringReset[TraceBundle]( resetCrossingType = crossingParams.resetCrossingType) context.traceNodes(domain.element.tileId) := traceCrossingNode := domain.element.traceNode val traceCoreCrossingNode = BundleBridgeBlockDuringReset[TraceCoreInterface]( resetCrossingType = crossingParams.resetCrossingType) context.traceCoreNodes(domain.element.tileId) :*= traceCoreCrossingNode := domain.element.traceCoreNode } } case class CloneTileAttachParams( sourceTileId: Int, cloneParams: CanAttachTile ) extends CanAttachTile { type TileType = cloneParams.TileType type TileContextType = cloneParams.TileContextType def tileParams = cloneParams.tileParams def crossingParams = cloneParams.crossingParams override def instantiate(allTileParams: Seq[TileParams], instantiatedTiles: SortedMap[Int, TilePRCIDomain[_]])(implicit p: Parameters): TilePRCIDomain[TileType] = { require(instantiatedTiles.contains(sourceTileId)) val clockSinkParams = tileParams.clockSinkParams.copy(name = Some(tileParams.uniqueName)) val tile_prci_domain = CloneLazyModule( new TilePRCIDomain[TileType](clockSinkParams, crossingParams) { self => val element = self.element_reset_domain { LazyModule(tileParams.instantiate(crossingParams, PriorityMuxHartIdFromSeq(allTileParams))) } }, instantiatedTiles(sourceTileId).asInstanceOf[TilePRCIDomain[TileType]] ) tile_prci_domain } } File ClockGroup.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.prci import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.resources.FixedClockResource case class ClockGroupingNode(groupName: String)(implicit valName: ValName) extends MixedNexusNode(ClockGroupImp, ClockImp)( dFn = { _ => ClockSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) }) { override def circuitIdentity = outputs.size == 1 } class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupingNode(groupName) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip require (node.in.size == 1) require (in.member.size == out.size) (in.member.data zip out) foreach { case (i, o) => o := i } } } object ClockGroup { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node } case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))}) { override def circuitIdentity = outputs.size == 1 } class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupAggregateNode(groupName) override lazy val desiredName = s"ClockGroupAggregator_$groupName" lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in.unzip val (out, _) = node.out.unzip val outputs = out.flatMap(_.member.data) require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1") require (in.head.member.size == outputs.size) in.head.member.data.zip(outputs).foreach { case (i, o) => o := i } } } object ClockGroupAggregator { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node } class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule { val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val (out, _) = node.out.unzip out.map { out: ClockGroupBundle => out.member.data.foreach { o => o.clock := clock; o.reset := reset } } } } object SimpleClockGroupSource { def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node } case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName) extends NexusNode(ClockImp)( dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) }, uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) }, inputRequiresOutput = false) { def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix))) } class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule { val node = new FixedClockBroadcastNode(fixedClockOpt) { override def circuitIdentity = outputs.size == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip override def desiredName = s"FixedClockBroadcast_${out.size}" require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock") out.foreach { _ := in } } } object FixedClockBroadcast { def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node } case class PRCIClockGroupNode()(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { _ => ClockGroupSinkParameters("prci", Nil) }, outputRequiresInput = false) File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module TilePRCIDomain_5( // @[ClockDomain.scala:14:9] output auto_intsink_out_1_0, // @[LazyModuleImp.scala:107:25] input auto_intsink_in_sync_0, // @[LazyModuleImp.scala:107:25] output auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid, // @[LazyModuleImp.scala:107:25] output [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr, // @[LazyModuleImp.scala:107:25] output [31:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn, // @[LazyModuleImp.scala:107:25] output [2:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv, // @[LazyModuleImp.scala:107:25] output auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception, // @[LazyModuleImp.scala:107:25] output auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt, // @[LazyModuleImp.scala:107:25] output [63:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause, // @[LazyModuleImp.scala:107:25] output [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval, // @[LazyModuleImp.scala:107:25] output [63:0] auto_element_reset_domain_rockettile_trace_source_out_time, // @[LazyModuleImp.scala:107:25] input [2:0] auto_element_reset_domain_rockettile_hartid_in, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_2_sync_0, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_1_sync_0, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_0_sync_0, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_0_sync_1, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_master_clock_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_tl_master_clock_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_master_clock_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_tl_master_clock_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_master_clock_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_b_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_master_clock_xing_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_master_clock_xing_out_b_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_master_clock_xing_out_b_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_master_clock_xing_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_tl_master_clock_xing_out_b_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_master_clock_xing_out_b_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_master_clock_xing_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_tl_master_clock_xing_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_master_clock_xing_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_master_clock_xing_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_master_clock_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_master_clock_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_master_clock_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_master_clock_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_e_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_e_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tap_clock_in_clock, // @[LazyModuleImp.scala:107:25] input auto_tap_clock_in_reset // @[LazyModuleImp.scala:107:25] ); wire clockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9] wire clockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9] wire element_reset_domain_auto_clock_in_reset; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_clock_in_clock; // @[ClockDomain.scala:14:9] wire auto_intsink_in_sync_0_0 = auto_intsink_in_sync_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_element_reset_domain_rockettile_hartid_in_0 = auto_element_reset_domain_rockettile_hartid_in; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_2_sync_0_0 = auto_int_in_clock_xing_in_2_sync_0; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_1_sync_0_0 = auto_int_in_clock_xing_in_1_sync_0; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_0_sync_0_0 = auto_int_in_clock_xing_in_0_sync_0; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_0_sync_1_0 = auto_int_in_clock_xing_in_0_sync_1; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_a_ready_0 = auto_tl_master_clock_xing_out_a_ready; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_b_valid_0 = auto_tl_master_clock_xing_out_b_valid; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_b_bits_opcode_0 = auto_tl_master_clock_xing_out_b_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_b_bits_param_0 = auto_tl_master_clock_xing_out_b_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_b_bits_size_0 = auto_tl_master_clock_xing_out_b_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_b_bits_source_0 = auto_tl_master_clock_xing_out_b_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] auto_tl_master_clock_xing_out_b_bits_address_0 = auto_tl_master_clock_xing_out_b_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] auto_tl_master_clock_xing_out_b_bits_mask_0 = auto_tl_master_clock_xing_out_b_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_b_bits_data_0 = auto_tl_master_clock_xing_out_b_bits_data; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_b_bits_corrupt_0 = auto_tl_master_clock_xing_out_b_bits_corrupt; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_c_ready_0 = auto_tl_master_clock_xing_out_c_ready; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_valid_0 = auto_tl_master_clock_xing_out_d_valid; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_d_bits_opcode_0 = auto_tl_master_clock_xing_out_d_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_d_bits_param_0 = auto_tl_master_clock_xing_out_d_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_d_bits_size_0 = auto_tl_master_clock_xing_out_d_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_d_bits_source_0 = auto_tl_master_clock_xing_out_d_bits_source; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_d_bits_sink_0 = auto_tl_master_clock_xing_out_d_bits_sink; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_bits_denied_0 = auto_tl_master_clock_xing_out_d_bits_denied; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_d_bits_data_0 = auto_tl_master_clock_xing_out_d_bits_data; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_bits_corrupt_0 = auto_tl_master_clock_xing_out_d_bits_corrupt; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_e_ready_0 = auto_tl_master_clock_xing_out_e_ready; // @[ClockDomain.scala:14:9] wire auto_tap_clock_in_clock_0 = auto_tap_clock_in_clock; // @[ClockDomain.scala:14:9] wire auto_tap_clock_in_reset_0 = auto_tap_clock_in_reset; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_core_source_out_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_core_source_out_tval = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_core_source_out_cause = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_core_source_out_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_core_source_out_tval = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_core_source_out_cause = 32'h0; // @[ClockDomain.scala:14:9] wire [3:0] auto_element_reset_domain_rockettile_trace_core_source_out_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9] wire [3:0] auto_element_reset_domain_rockettile_trace_core_source_out_priv = 4'h0; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_trace_core_source_out_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_trace_core_source_out_priv = 4'h0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_reset_vector_in = 32'h10000; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_reset_vector_in = 32'h10000; // @[ClockDomain.scala:14:9] wire auto_intsink_out_2_0 = 1'h0; // @[ClockDomain.scala:14:9] wire auto_intsink_out_0_0 = 1'h0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_core_source_out_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_core_source_out_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire element_reset_domain_auto_rockettile_buffer_out_a_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_c_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_cease_out_0 = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_halt_out_0 = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_core_source_out_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_core_source_out_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire clockNode_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire clockNode_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire clockNode__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire intOutClockXingOut_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_1_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_1_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_4_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_4_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_5_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_5_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire element_reset_domain_auto_rockettile_trace_source_out_insns_0_valid; // @[ClockDomain.scala:14:9] wire [39:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_iaddr; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_insn; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_priv; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_source_out_insns_0_exception; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_source_out_insns_0_interrupt; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_cause; // @[ClockDomain.scala:14:9] wire [39:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_tval; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_trace_source_out_time; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_hartid_in = auto_element_reset_domain_rockettile_hartid_in_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_2_sync_0 = auto_int_in_clock_xing_in_2_sync_0_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_1_sync_0 = auto_int_in_clock_xing_in_1_sync_0_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_sync_0 = auto_int_in_clock_xing_in_0_sync_0_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_sync_1 = auto_int_in_clock_xing_in_0_sync_1_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_a_ready = auto_tl_master_clock_xing_out_a_ready_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17] wire [1:0] tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_b_ready; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_b_valid = auto_tl_master_clock_xing_out_b_valid_0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_b_bits_opcode = auto_tl_master_clock_xing_out_b_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingOut_b_bits_param = auto_tl_master_clock_xing_out_b_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingOut_b_bits_size = auto_tl_master_clock_xing_out_b_bits_size_0; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingOut_b_bits_source = auto_tl_master_clock_xing_out_b_bits_source_0; // @[ClockDomain.scala:14:9] wire [31:0] tlMasterClockXingOut_b_bits_address = auto_tl_master_clock_xing_out_b_bits_address_0; // @[ClockDomain.scala:14:9] wire [7:0] tlMasterClockXingOut_b_bits_mask = auto_tl_master_clock_xing_out_b_bits_mask_0; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingOut_b_bits_data = auto_tl_master_clock_xing_out_b_bits_data_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_b_bits_corrupt = auto_tl_master_clock_xing_out_b_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_c_ready = auto_tl_master_clock_xing_out_c_ready_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlMasterClockXingOut_c_bits_size; // @[MixedNode.scala:542:17] wire [1:0] tlMasterClockXingOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlMasterClockXingOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] tlMasterClockXingOut_c_bits_data; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_c_bits_corrupt; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_d_valid = auto_tl_master_clock_xing_out_d_valid_0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_d_bits_opcode = auto_tl_master_clock_xing_out_d_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingOut_d_bits_param = auto_tl_master_clock_xing_out_d_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingOut_d_bits_size = auto_tl_master_clock_xing_out_d_bits_size_0; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingOut_d_bits_source = auto_tl_master_clock_xing_out_d_bits_source_0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_d_bits_sink = auto_tl_master_clock_xing_out_d_bits_sink_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_d_bits_denied = auto_tl_master_clock_xing_out_d_bits_denied_0; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingOut_d_bits_data = auto_tl_master_clock_xing_out_d_bits_data_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_d_bits_corrupt = auto_tl_master_clock_xing_out_d_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_e_ready = auto_tl_master_clock_xing_out_e_ready_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_e_bits_sink; // @[MixedNode.scala:542:17] wire tapClockNodeIn_clock = auto_tap_clock_in_clock_0; // @[ClockDomain.scala:14:9] wire tapClockNodeIn_reset = auto_tap_clock_in_reset_0; // @[ClockDomain.scala:14:9] wire auto_intsink_out_1_0_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid_0; // @[ClockDomain.scala:14:9] wire [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause_0; // @[ClockDomain.scala:14:9] wire [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_element_reset_domain_rockettile_trace_source_out_time_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_tl_master_clock_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9] wire [7:0] auto_tl_master_clock_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_a_valid_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_b_ready_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_c_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_c_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_c_bits_size_0; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_c_bits_source_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_tl_master_clock_xing_out_c_bits_address_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_c_bits_data_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_c_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_c_valid_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_ready_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_e_bits_sink_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_e_valid_0; // @[ClockDomain.scala:14:9] wire childClock; // @[LazyModuleImp.scala:155:31] wire childReset; // @[LazyModuleImp.scala:158:31] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_valid; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_iaddr; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_insn; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_priv; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_exception; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_interrupt; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_cause; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_tval; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_time_0 = element_reset_domain_auto_rockettile_trace_source_out_time; // @[ClockDomain.scala:14:9] wire clockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9] wire element_reset_domain_clockNodeIn_clock = element_reset_domain_auto_clock_in_clock; // @[ClockDomain.scala:14:9] wire clockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_clockNodeIn_reset = element_reset_domain_auto_clock_in_reset; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_a_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_a_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_b_bits_corrupt; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_b_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_b_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_address; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_c_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_c_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_source; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_sink; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_bits_denied; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_bits_corrupt; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_e_bits_sink; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_e_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_e_valid; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_wfi_out_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_3_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_2_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_1_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_1_1; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_0_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_childClock; // @[LazyModuleImp.scala:155:31] wire element_reset_domain_childReset; // @[LazyModuleImp.scala:158:31] assign element_reset_domain_childClock = element_reset_domain_clockNodeIn_clock; // @[MixedNode.scala:551:17] assign element_reset_domain_childReset = element_reset_domain_clockNodeIn_reset; // @[MixedNode.scala:551:17] wire tapClockNodeOut_clock; // @[MixedNode.scala:542:17] wire clockNode_anonIn_clock = clockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9] wire tapClockNodeOut_reset; // @[MixedNode.scala:542:17] wire clockNode_anonOut_clock; // @[MixedNode.scala:542:17] wire clockNode_anonIn_reset = clockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9] assign element_reset_domain_auto_clock_in_clock = clockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9] wire clockNode_anonOut_reset; // @[MixedNode.scala:542:17] assign element_reset_domain_auto_clock_in_reset = clockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9] assign clockNode_auto_anon_out_clock = clockNode_anonOut_clock; // @[ClockGroup.scala:104:9] assign clockNode_auto_anon_out_reset = clockNode_anonOut_reset; // @[ClockGroup.scala:104:9] assign clockNode_anonOut_clock = clockNode_anonIn_clock; // @[MixedNode.scala:542:17, :551:17] assign clockNode_anonOut_reset = clockNode_anonIn_reset; // @[MixedNode.scala:542:17, :551:17] assign clockNode_auto_anon_in_clock = tapClockNodeOut_clock; // @[ClockGroup.scala:104:9] assign clockNode_auto_anon_in_reset = tapClockNodeOut_reset; // @[ClockGroup.scala:104:9] assign childClock = tapClockNodeIn_clock; // @[MixedNode.scala:551:17] assign tapClockNodeOut_clock = tapClockNodeIn_clock; // @[MixedNode.scala:542:17, :551:17] assign childReset = tapClockNodeIn_reset; // @[MixedNode.scala:551:17] assign tapClockNodeOut_reset = tapClockNodeIn_reset; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_a_ready = tlMasterClockXingOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_a_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_valid_0 = tlMasterClockXingOut_a_valid; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_opcode_0 = tlMasterClockXingOut_a_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_param_0 = tlMasterClockXingOut_a_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_size_0 = tlMasterClockXingOut_a_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_source_0 = tlMasterClockXingOut_a_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_address_0 = tlMasterClockXingOut_a_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_mask_0 = tlMasterClockXingOut_a_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_data_0 = tlMasterClockXingOut_a_bits_data; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_corrupt_0 = tlMasterClockXingOut_a_bits_corrupt; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_b_ready; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_b_ready_0 = tlMasterClockXingOut_b_ready; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_b_valid = tlMasterClockXingOut_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_b_bits_opcode = tlMasterClockXingOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingIn_b_bits_param = tlMasterClockXingOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingIn_b_bits_size = tlMasterClockXingOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingIn_b_bits_source = tlMasterClockXingOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlMasterClockXingIn_b_bits_address = tlMasterClockXingOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlMasterClockXingIn_b_bits_mask = tlMasterClockXingOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingIn_b_bits_data = tlMasterClockXingOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_b_bits_corrupt = tlMasterClockXingOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_c_ready = tlMasterClockXingOut_c_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_c_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_valid_0 = tlMasterClockXingOut_c_valid; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_opcode_0 = tlMasterClockXingOut_c_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_param_0 = tlMasterClockXingOut_c_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_size_0 = tlMasterClockXingOut_c_bits_size; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_source_0 = tlMasterClockXingOut_c_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_address_0 = tlMasterClockXingOut_c_bits_address; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_data_0 = tlMasterClockXingOut_c_bits_data; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_corrupt_0 = tlMasterClockXingOut_c_bits_corrupt; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_d_ready; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_d_ready_0 = tlMasterClockXingOut_d_ready; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_d_valid = tlMasterClockXingOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_d_bits_opcode = tlMasterClockXingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingIn_d_bits_param = tlMasterClockXingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingIn_d_bits_size = tlMasterClockXingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingIn_d_bits_source = tlMasterClockXingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_d_bits_sink = tlMasterClockXingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_d_bits_denied = tlMasterClockXingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingIn_d_bits_data = tlMasterClockXingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_d_bits_corrupt = tlMasterClockXingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_e_ready = tlMasterClockXingOut_e_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_e_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_e_valid_0 = tlMasterClockXingOut_e_valid; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_e_bits_sink_0 = tlMasterClockXingOut_e_bits_sink; // @[ClockDomain.scala:14:9] assign tlMasterClockXingOut_a_valid = tlMasterClockXingIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_opcode = tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_param = tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_size = tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_source = tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_address = tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_mask = tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_data = tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_corrupt = tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_b_ready = tlMasterClockXingIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_valid = tlMasterClockXingIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_opcode = tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_param = tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_size = tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_source = tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_address = tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_data = tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_corrupt = tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_d_ready = tlMasterClockXingIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_e_valid = tlMasterClockXingIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_e_bits_sink = tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire intInClockXingOut_sync_0; // @[MixedNode.scala:542:17] wire intInClockXingOut_sync_1; // @[MixedNode.scala:542:17] assign intInClockXingOut_sync_0 = intInClockXingIn_sync_0; // @[MixedNode.scala:542:17, :551:17] assign intInClockXingOut_sync_1 = intInClockXingIn_sync_1; // @[MixedNode.scala:542:17, :551:17] wire intInClockXingOut_1_sync_0; // @[MixedNode.scala:542:17] assign intInClockXingOut_1_sync_0 = intInClockXingIn_1_sync_0; // @[MixedNode.scala:542:17, :551:17] wire intInClockXingOut_2_sync_0; // @[MixedNode.scala:542:17] assign intInClockXingOut_2_sync_0 = intInClockXingIn_2_sync_0; // @[MixedNode.scala:542:17, :551:17] wire intOutClockXingIn_2_sync_0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_2_sync_0; // @[MixedNode.scala:542:17] wire intOutClockXingOut_3_sync_0; // @[MixedNode.scala:542:17] assign intOutClockXingOut_2_sync_0 = intOutClockXingIn_2_sync_0; // @[MixedNode.scala:542:17, :551:17] wire intOutClockXingIn_3_sync_0; // @[MixedNode.scala:551:17] assign intOutClockXingIn_2_sync_0 = intOutClockXingOut_3_sync_0; // @[MixedNode.scala:542:17, :551:17] assign intOutClockXingOut_3_sync_0 = intOutClockXingIn_3_sync_0; // @[MixedNode.scala:542:17, :551:17] RocketTile_5 element_reset_domain_rockettile ( // @[HasTiles.scala:164:59] .clock (element_reset_domain_childClock), // @[LazyModuleImp.scala:155:31] .reset (element_reset_domain_childReset), // @[LazyModuleImp.scala:158:31] .auto_buffer_out_a_ready (element_reset_domain_auto_rockettile_buffer_out_a_ready), // @[ClockDomain.scala:14:9] .auto_buffer_out_a_valid (element_reset_domain_auto_rockettile_buffer_out_a_valid), .auto_buffer_out_a_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_a_bits_opcode), .auto_buffer_out_a_bits_param (element_reset_domain_auto_rockettile_buffer_out_a_bits_param), .auto_buffer_out_a_bits_size (element_reset_domain_auto_rockettile_buffer_out_a_bits_size), .auto_buffer_out_a_bits_source (element_reset_domain_auto_rockettile_buffer_out_a_bits_source), .auto_buffer_out_a_bits_address (element_reset_domain_auto_rockettile_buffer_out_a_bits_address), .auto_buffer_out_a_bits_mask (element_reset_domain_auto_rockettile_buffer_out_a_bits_mask), .auto_buffer_out_a_bits_data (element_reset_domain_auto_rockettile_buffer_out_a_bits_data), .auto_buffer_out_b_ready (element_reset_domain_auto_rockettile_buffer_out_b_ready), .auto_buffer_out_b_valid (element_reset_domain_auto_rockettile_buffer_out_b_valid), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_b_bits_opcode), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_param (element_reset_domain_auto_rockettile_buffer_out_b_bits_param), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_size (element_reset_domain_auto_rockettile_buffer_out_b_bits_size), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_source (element_reset_domain_auto_rockettile_buffer_out_b_bits_source), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_address (element_reset_domain_auto_rockettile_buffer_out_b_bits_address), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_mask (element_reset_domain_auto_rockettile_buffer_out_b_bits_mask), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_data (element_reset_domain_auto_rockettile_buffer_out_b_bits_data), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_b_bits_corrupt), // @[ClockDomain.scala:14:9] .auto_buffer_out_c_ready (element_reset_domain_auto_rockettile_buffer_out_c_ready), // @[ClockDomain.scala:14:9] .auto_buffer_out_c_valid (element_reset_domain_auto_rockettile_buffer_out_c_valid), .auto_buffer_out_c_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_c_bits_opcode), .auto_buffer_out_c_bits_param (element_reset_domain_auto_rockettile_buffer_out_c_bits_param), .auto_buffer_out_c_bits_size (element_reset_domain_auto_rockettile_buffer_out_c_bits_size), .auto_buffer_out_c_bits_source (element_reset_domain_auto_rockettile_buffer_out_c_bits_source), .auto_buffer_out_c_bits_address (element_reset_domain_auto_rockettile_buffer_out_c_bits_address), .auto_buffer_out_c_bits_data (element_reset_domain_auto_rockettile_buffer_out_c_bits_data), .auto_buffer_out_d_ready (element_reset_domain_auto_rockettile_buffer_out_d_ready), .auto_buffer_out_d_valid (element_reset_domain_auto_rockettile_buffer_out_d_valid), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_d_bits_opcode), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_param (element_reset_domain_auto_rockettile_buffer_out_d_bits_param), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_size (element_reset_domain_auto_rockettile_buffer_out_d_bits_size), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_source (element_reset_domain_auto_rockettile_buffer_out_d_bits_source), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_sink (element_reset_domain_auto_rockettile_buffer_out_d_bits_sink), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_denied (element_reset_domain_auto_rockettile_buffer_out_d_bits_denied), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_data (element_reset_domain_auto_rockettile_buffer_out_d_bits_data), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_d_bits_corrupt), // @[ClockDomain.scala:14:9] .auto_buffer_out_e_ready (element_reset_domain_auto_rockettile_buffer_out_e_ready), // @[ClockDomain.scala:14:9] .auto_buffer_out_e_valid (element_reset_domain_auto_rockettile_buffer_out_e_valid), .auto_buffer_out_e_bits_sink (element_reset_domain_auto_rockettile_buffer_out_e_bits_sink), .auto_wfi_out_0 (element_reset_domain_auto_rockettile_wfi_out_0), .auto_int_local_in_3_0 (element_reset_domain_auto_rockettile_int_local_in_3_0), // @[ClockDomain.scala:14:9] .auto_int_local_in_2_0 (element_reset_domain_auto_rockettile_int_local_in_2_0), // @[ClockDomain.scala:14:9] .auto_int_local_in_1_0 (element_reset_domain_auto_rockettile_int_local_in_1_0), // @[ClockDomain.scala:14:9] .auto_int_local_in_1_1 (element_reset_domain_auto_rockettile_int_local_in_1_1), // @[ClockDomain.scala:14:9] .auto_int_local_in_0_0 (element_reset_domain_auto_rockettile_int_local_in_0_0), // @[ClockDomain.scala:14:9] .auto_trace_source_out_insns_0_valid (element_reset_domain_auto_rockettile_trace_source_out_insns_0_valid), .auto_trace_source_out_insns_0_iaddr (element_reset_domain_auto_rockettile_trace_source_out_insns_0_iaddr), .auto_trace_source_out_insns_0_insn (element_reset_domain_auto_rockettile_trace_source_out_insns_0_insn), .auto_trace_source_out_insns_0_priv (element_reset_domain_auto_rockettile_trace_source_out_insns_0_priv), .auto_trace_source_out_insns_0_exception (element_reset_domain_auto_rockettile_trace_source_out_insns_0_exception), .auto_trace_source_out_insns_0_interrupt (element_reset_domain_auto_rockettile_trace_source_out_insns_0_interrupt), .auto_trace_source_out_insns_0_cause (element_reset_domain_auto_rockettile_trace_source_out_insns_0_cause), .auto_trace_source_out_insns_0_tval (element_reset_domain_auto_rockettile_trace_source_out_insns_0_tval), .auto_trace_source_out_time (element_reset_domain_auto_rockettile_trace_source_out_time), .auto_hartid_in (element_reset_domain_auto_rockettile_hartid_in) // @[ClockDomain.scala:14:9] ); // @[HasTiles.scala:164:59] TLBuffer_a32d64s2k3z4c_11 buffer ( // @[Buffer.scala:75:28] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_a_ready (element_reset_domain_auto_rockettile_buffer_out_a_ready), .auto_in_a_valid (element_reset_domain_auto_rockettile_buffer_out_a_valid), // @[ClockDomain.scala:14:9] .auto_in_a_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_a_bits_opcode), // @[ClockDomain.scala:14:9] .auto_in_a_bits_param (element_reset_domain_auto_rockettile_buffer_out_a_bits_param), // @[ClockDomain.scala:14:9] .auto_in_a_bits_size (element_reset_domain_auto_rockettile_buffer_out_a_bits_size), // @[ClockDomain.scala:14:9] .auto_in_a_bits_source (element_reset_domain_auto_rockettile_buffer_out_a_bits_source), // @[ClockDomain.scala:14:9] .auto_in_a_bits_address (element_reset_domain_auto_rockettile_buffer_out_a_bits_address), // @[ClockDomain.scala:14:9] .auto_in_a_bits_mask (element_reset_domain_auto_rockettile_buffer_out_a_bits_mask), // @[ClockDomain.scala:14:9] .auto_in_a_bits_data (element_reset_domain_auto_rockettile_buffer_out_a_bits_data), // @[ClockDomain.scala:14:9] .auto_in_b_ready (element_reset_domain_auto_rockettile_buffer_out_b_ready), // @[ClockDomain.scala:14:9] .auto_in_b_valid (element_reset_domain_auto_rockettile_buffer_out_b_valid), .auto_in_b_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_b_bits_opcode), .auto_in_b_bits_param (element_reset_domain_auto_rockettile_buffer_out_b_bits_param), .auto_in_b_bits_size (element_reset_domain_auto_rockettile_buffer_out_b_bits_size), .auto_in_b_bits_source (element_reset_domain_auto_rockettile_buffer_out_b_bits_source), .auto_in_b_bits_address (element_reset_domain_auto_rockettile_buffer_out_b_bits_address), .auto_in_b_bits_mask (element_reset_domain_auto_rockettile_buffer_out_b_bits_mask), .auto_in_b_bits_data (element_reset_domain_auto_rockettile_buffer_out_b_bits_data), .auto_in_b_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_b_bits_corrupt), .auto_in_c_ready (element_reset_domain_auto_rockettile_buffer_out_c_ready), .auto_in_c_valid (element_reset_domain_auto_rockettile_buffer_out_c_valid), // @[ClockDomain.scala:14:9] .auto_in_c_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_c_bits_opcode), // @[ClockDomain.scala:14:9] .auto_in_c_bits_param (element_reset_domain_auto_rockettile_buffer_out_c_bits_param), // @[ClockDomain.scala:14:9] .auto_in_c_bits_size (element_reset_domain_auto_rockettile_buffer_out_c_bits_size), // @[ClockDomain.scala:14:9] .auto_in_c_bits_source (element_reset_domain_auto_rockettile_buffer_out_c_bits_source), // @[ClockDomain.scala:14:9] .auto_in_c_bits_address (element_reset_domain_auto_rockettile_buffer_out_c_bits_address), // @[ClockDomain.scala:14:9] .auto_in_c_bits_data (element_reset_domain_auto_rockettile_buffer_out_c_bits_data), // @[ClockDomain.scala:14:9] .auto_in_d_ready (element_reset_domain_auto_rockettile_buffer_out_d_ready), // @[ClockDomain.scala:14:9] .auto_in_d_valid (element_reset_domain_auto_rockettile_buffer_out_d_valid), .auto_in_d_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_d_bits_opcode), .auto_in_d_bits_param (element_reset_domain_auto_rockettile_buffer_out_d_bits_param), .auto_in_d_bits_size (element_reset_domain_auto_rockettile_buffer_out_d_bits_size), .auto_in_d_bits_source (element_reset_domain_auto_rockettile_buffer_out_d_bits_source), .auto_in_d_bits_sink (element_reset_domain_auto_rockettile_buffer_out_d_bits_sink), .auto_in_d_bits_denied (element_reset_domain_auto_rockettile_buffer_out_d_bits_denied), .auto_in_d_bits_data (element_reset_domain_auto_rockettile_buffer_out_d_bits_data), .auto_in_d_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_d_bits_corrupt), .auto_in_e_ready (element_reset_domain_auto_rockettile_buffer_out_e_ready), .auto_in_e_valid (element_reset_domain_auto_rockettile_buffer_out_e_valid), // @[ClockDomain.scala:14:9] .auto_in_e_bits_sink (element_reset_domain_auto_rockettile_buffer_out_e_bits_sink), // @[ClockDomain.scala:14:9] .auto_out_a_ready (tlMasterClockXingIn_a_ready), // @[MixedNode.scala:551:17] .auto_out_a_valid (tlMasterClockXingIn_a_valid), .auto_out_a_bits_opcode (tlMasterClockXingIn_a_bits_opcode), .auto_out_a_bits_param (tlMasterClockXingIn_a_bits_param), .auto_out_a_bits_size (tlMasterClockXingIn_a_bits_size), .auto_out_a_bits_source (tlMasterClockXingIn_a_bits_source), .auto_out_a_bits_address (tlMasterClockXingIn_a_bits_address), .auto_out_a_bits_mask (tlMasterClockXingIn_a_bits_mask), .auto_out_a_bits_data (tlMasterClockXingIn_a_bits_data), .auto_out_a_bits_corrupt (tlMasterClockXingIn_a_bits_corrupt), .auto_out_b_ready (tlMasterClockXingIn_b_ready), .auto_out_b_valid (tlMasterClockXingIn_b_valid), // @[MixedNode.scala:551:17] .auto_out_b_bits_opcode (tlMasterClockXingIn_b_bits_opcode), // @[MixedNode.scala:551:17] .auto_out_b_bits_param (tlMasterClockXingIn_b_bits_param), // @[MixedNode.scala:551:17] .auto_out_b_bits_size (tlMasterClockXingIn_b_bits_size), // @[MixedNode.scala:551:17] .auto_out_b_bits_source (tlMasterClockXingIn_b_bits_source), // @[MixedNode.scala:551:17] .auto_out_b_bits_address (tlMasterClockXingIn_b_bits_address), // @[MixedNode.scala:551:17] .auto_out_b_bits_mask (tlMasterClockXingIn_b_bits_mask), // @[MixedNode.scala:551:17] .auto_out_b_bits_data (tlMasterClockXingIn_b_bits_data), // @[MixedNode.scala:551:17] .auto_out_b_bits_corrupt (tlMasterClockXingIn_b_bits_corrupt), // @[MixedNode.scala:551:17] .auto_out_c_ready (tlMasterClockXingIn_c_ready), // @[MixedNode.scala:551:17] .auto_out_c_valid (tlMasterClockXingIn_c_valid), .auto_out_c_bits_opcode (tlMasterClockXingIn_c_bits_opcode), .auto_out_c_bits_param (tlMasterClockXingIn_c_bits_param), .auto_out_c_bits_size (tlMasterClockXingIn_c_bits_size), .auto_out_c_bits_source (tlMasterClockXingIn_c_bits_source), .auto_out_c_bits_address (tlMasterClockXingIn_c_bits_address), .auto_out_c_bits_data (tlMasterClockXingIn_c_bits_data), .auto_out_c_bits_corrupt (tlMasterClockXingIn_c_bits_corrupt), .auto_out_d_ready (tlMasterClockXingIn_d_ready), .auto_out_d_valid (tlMasterClockXingIn_d_valid), // @[MixedNode.scala:551:17] .auto_out_d_bits_opcode (tlMasterClockXingIn_d_bits_opcode), // @[MixedNode.scala:551:17] .auto_out_d_bits_param (tlMasterClockXingIn_d_bits_param), // @[MixedNode.scala:551:17] .auto_out_d_bits_size (tlMasterClockXingIn_d_bits_size), // @[MixedNode.scala:551:17] .auto_out_d_bits_source (tlMasterClockXingIn_d_bits_source), // @[MixedNode.scala:551:17] .auto_out_d_bits_sink (tlMasterClockXingIn_d_bits_sink), // @[MixedNode.scala:551:17] .auto_out_d_bits_denied (tlMasterClockXingIn_d_bits_denied), // @[MixedNode.scala:551:17] .auto_out_d_bits_data (tlMasterClockXingIn_d_bits_data), // @[MixedNode.scala:551:17] .auto_out_d_bits_corrupt (tlMasterClockXingIn_d_bits_corrupt), // @[MixedNode.scala:551:17] .auto_out_e_ready (tlMasterClockXingIn_e_ready), // @[MixedNode.scala:551:17] .auto_out_e_valid (tlMasterClockXingIn_e_valid), .auto_out_e_bits_sink (tlMasterClockXingIn_e_bits_sink) ); // @[Buffer.scala:75:28] TLBuffer_16 buffer_1 ( // @[Buffer.scala:75:28] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[Buffer.scala:75:28] IntSyncAsyncCrossingSink_n1x1_5 intsink ( // @[Crossing.scala:86:29] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_sync_0 (auto_intsink_in_sync_0_0), // @[ClockDomain.scala:14:9] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_0_0) ); // @[Crossing.scala:86:29] IntSyncSyncCrossingSink_n1x2_9 intsink_1 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intInClockXingOut_sync_0), // @[MixedNode.scala:542:17] .auto_in_sync_1 (intInClockXingOut_sync_1), // @[MixedNode.scala:542:17] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_1_0), .auto_out_1 (element_reset_domain_auto_rockettile_int_local_in_1_1) ); // @[Crossing.scala:109:29] IntSyncSyncCrossingSink_n1x1_33 intsink_2 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intInClockXingOut_1_sync_0), // @[MixedNode.scala:542:17] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_2_0) ); // @[Crossing.scala:109:29] IntSyncSyncCrossingSink_n1x1_34 intsink_3 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intInClockXingOut_2_sync_0), // @[MixedNode.scala:542:17] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_3_0) ); // @[Crossing.scala:109:29] IntSyncSyncCrossingSink_n1x1_35 intsink_4 (); // @[Crossing.scala:109:29] IntSyncCrossingSource_n1x1_23 intsource ( // @[Crossing.scala:29:31] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[Crossing.scala:29:31] IntSyncSyncCrossingSink_n1x1_36 intsink_5 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intOutClockXingOut_2_sync_0), // @[MixedNode.scala:542:17] .auto_out_0 (auto_intsink_out_1_0_0) ); // @[Crossing.scala:109:29] IntSyncCrossingSource_n1x1_24 intsource_1 ( // @[Crossing.scala:29:31] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_0 (element_reset_domain_auto_rockettile_wfi_out_0), // @[ClockDomain.scala:14:9] .auto_out_sync_0 (intOutClockXingIn_3_sync_0) ); // @[Crossing.scala:29:31] IntSyncSyncCrossingSink_n1x1_37 intsink_6 (); // @[Crossing.scala:109:29] IntSyncCrossingSource_n1x1_25 intsource_2 ( // @[Crossing.scala:29:31] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[Crossing.scala:29:31] assign auto_intsink_out_1_0 = auto_intsink_out_1_0_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid = auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr = auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn = auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv = auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception = auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt = auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause = auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval = auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_time = auto_element_reset_domain_rockettile_trace_source_out_time_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_valid = auto_tl_master_clock_xing_out_a_valid_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_opcode = auto_tl_master_clock_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_param = auto_tl_master_clock_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_size = auto_tl_master_clock_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_source = auto_tl_master_clock_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_address = auto_tl_master_clock_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_mask = auto_tl_master_clock_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_data = auto_tl_master_clock_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_corrupt = auto_tl_master_clock_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_b_ready = auto_tl_master_clock_xing_out_b_ready_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_valid = auto_tl_master_clock_xing_out_c_valid_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_opcode = auto_tl_master_clock_xing_out_c_bits_opcode_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_param = auto_tl_master_clock_xing_out_c_bits_param_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_size = auto_tl_master_clock_xing_out_c_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_source = auto_tl_master_clock_xing_out_c_bits_source_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_address = auto_tl_master_clock_xing_out_c_bits_address_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_data = auto_tl_master_clock_xing_out_c_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_corrupt = auto_tl_master_clock_xing_out_c_bits_corrupt_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_d_ready = auto_tl_master_clock_xing_out_d_ready_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_e_valid = auto_tl_master_clock_xing_out_e_valid_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_e_bits_sink = auto_tl_master_clock_xing_out_e_bits_sink_0; // @[ClockDomain.scala:14:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_292( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_56( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_set_wo_ready_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [130:0] _c_opcodes_set_T_1 = 131'h0; // @[Monitor.scala:767:54] wire [130:0] _c_sizes_set_T_1 = 131'h0; // @[Monitor.scala:768:52] wire [6:0] _c_opcodes_set_T = 7'h0; // @[Monitor.scala:767:79] wire [6:0] _c_sizes_set_T = 7'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [15:0] _c_set_wo_ready_T = 16'h1; // @[OneHot.scala:58:35] wire [15:0] _c_set_T = 16'h1; // @[OneHot.scala:58:35] wire [39:0] c_opcodes_set = 40'h0; // @[Monitor.scala:740:34] wire [39:0] c_sizes_set = 40'h0; // @[Monitor.scala:741:34] wire [9:0] c_set = 10'h0; // @[Monitor.scala:738:34] wire [9:0] c_set_wo_ready = 10'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [3:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_4 = source_ok_uncommonBits < 4'hA; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [3:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [3:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_10 = source_ok_uncommonBits_1 < 4'hA; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire _T_672 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_672; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_672; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [27:0] address; // @[Monitor.scala:391:22] wire _T_745 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_745; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_745; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_745; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [9:0] inflight; // @[Monitor.scala:614:27] reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [39:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [9:0] a_set; // @[Monitor.scala:626:34] wire [9:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [39:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [39:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [6:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [6:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [6:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [6:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [6:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [6:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [6:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [6:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [6:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [39:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [39:0] _a_opcode_lookup_T_6 = {36'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [39:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[39:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [39:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [39:0] _a_size_lookup_T_6 = {36'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [39:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[39:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [15:0] _GEN_2 = 16'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [15:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [15:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[9:0] : 10'h0; // @[OneHot.scala:58:35] wire _T_598 = _T_672 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_598 ? _a_set_T[9:0] : 10'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [6:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [6:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [6:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [130:0] _a_opcodes_set_T_1 = {127'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[39:0] : 40'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [130:0] _a_sizes_set_T_1 = {127'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[39:0] : 40'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [9:0] d_clr; // @[Monitor.scala:664:34] wire [9:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [39:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [39:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [15:0] _GEN_5 = 16'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [15:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[9:0] : 10'h0; // @[OneHot.scala:58:35] wire _T_613 = _T_745 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_613 ? _d_clr_T[9:0] : 10'h0; // @[OneHot.scala:58:35] wire [142:0] _d_opcodes_clr_T_5 = 143'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[39:0] : 40'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [142:0] _d_sizes_clr_T_5 = 143'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[39:0] : 40'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [9:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [9:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [9:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [39:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [39:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [39:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [39:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [39:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [39:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [9:0] inflight_1; // @[Monitor.scala:726:35] wire [9:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [39:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [39:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [39:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [39:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [39:0] _c_opcode_lookup_T_6 = {36'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [39:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[39:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [39:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [39:0] _c_size_lookup_T_6 = {36'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [39:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[39:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [9:0] d_clr_1; // @[Monitor.scala:774:34] wire [9:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [39:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [39:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_716 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_716 & d_release_ack_1 ? _d_clr_wo_ready_T_1[9:0] : 10'h0; // @[OneHot.scala:58:35] wire _T_698 = _T_745 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_698 ? _d_clr_T_1[9:0] : 10'h0; // @[OneHot.scala:58:35] wire [142:0] _d_opcodes_clr_T_11 = 143'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_698 ? _d_opcodes_clr_T_11[39:0] : 40'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [142:0] _d_sizes_clr_T_11 = 143'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_698 ? _d_sizes_clr_T_11[39:0] : 40'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 4'h0; // @[Monitor.scala:36:7, :795:113] wire [9:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [9:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [39:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [39:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [39:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [39:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Fragmenter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes} import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1} import scala.math.min import freechips.rocketchip.util.DataToAugmentedData object EarlyAck { sealed trait T case object AllPuts extends T case object PutFulls extends T case object None extends T } // minSize: minimum size of transfers supported by all outward managers // maxSize: maximum size of transfers supported after the Fragmenter is applied // alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager) // earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat // holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst // nameSuffix: appends a suffix to the module name // Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint // Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin) // Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize") require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize") require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize") val fragmentBits = log2Ceil(maxSize / minSize) val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0 val toggleBits = 1 val addedBits = fragmentBits + toggleBits + fullBits def expandTransfer(x: TransferSizes, op: String) = if (!x) x else { // validate that we can apply the fragmenter correctly require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})") TransferSizes(x.min, maxSize) } private def noChangeRequired = minSize == maxSize private def shrinkTransfer(x: TransferSizes) = if (!alwaysMin) x else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max)) else TransferSizes.none private def mapManager(m: TLSlaveParameters) = m.v1copy( supportsArithmetic = shrinkTransfer(m.supportsArithmetic), supportsLogical = shrinkTransfer(m.supportsLogical), supportsGet = expandTransfer(m.supportsGet, "Get"), supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"), supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"), supportsHint = expandTransfer(m.supportsHint, "Hint")) val node = new TLAdapterNode( // We require that all the responses are mutually FIFO // Thus we need to compact all of the masters into one big master clientFn = { c => (if (noChangeRequired) c else c.v2copy( masters = Seq(TLMasterParameters.v2( name = "TLFragmenter", sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)), requestFifo = true, emits = TLMasterToSlaveTransferSizes( acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)), acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)), arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)), logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)), get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)), putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)), putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)), hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _)) ) )) ))}, managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) } ) { override def circuitIdentity = noChangeRequired } lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => if (noChangeRequired) { out <> in } else { // All managers must share a common FIFO domain (responses might end up interleaved) val manager = edgeOut.manager val managers = manager.managers val beatBytes = manager.beatBytes val fifoId = managers(0).fifoId require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _)) require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe, s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region") require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses") // We can't support devices which are cached on both sides of us require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe) // We can't support denied because we reassemble fragments require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true") require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None) /* The Fragmenter is a bit tricky, because there are 5 sizes in play: * max size -- the maximum transfer size possible * orig size -- the original pre-fragmenter size * frag size -- the modified post-fragmenter size * min size -- the threshold below which frag=orig * beat size -- the amount transfered on any given beat * * The relationships are as follows: * max >= orig >= frag * max > min >= beat * It IS possible that orig <= min (then frag=orig; ie: no fragmentation) * * The fragment# (sent via TL.source) is measured in multiples of min size. * Meanwhile, to track the progress, counters measure in multiples of beat size. * * Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16. * * in.A out.A (frag#) out.D (frag#) in.D gen# ack# * get64 get16 6 ackD16 6 ackD64 12 15 * ackD16 6 ackD64 14 * ackD16 6 ackD64 13 * ackD16 6 ackD64 12 * get16 4 ackD16 4 ackD64 8 11 * ackD16 4 ackD64 10 * ackD16 4 ackD64 9 * ackD16 4 ackD64 8 * get16 2 ackD16 2 ackD64 4 7 * ackD16 2 ackD64 6 * ackD16 2 ackD64 5 * ackD16 2 ackD64 4 * get16 0 ackD16 0 ackD64 0 3 * ackD16 0 ackD64 2 * ackD16 0 ackD64 1 * ackD16 0 ackD64 0 * * get8 get8 0 ackD8 0 ackD8 0 1 * ackD8 0 ackD8 0 * * get4 get4 0 ackD4 0 ackD4 0 0 * get1 get1 0 ackD1 0 ackD1 0 0 * * put64 put16 6 15 * put64 put16 6 14 * put64 put16 6 13 * put64 put16 6 ack16 6 12 12 * put64 put16 4 11 * put64 put16 4 10 * put64 put16 4 9 * put64 put16 4 ack16 4 8 8 * put64 put16 2 7 * put64 put16 2 6 * put64 put16 2 5 * put64 put16 2 ack16 2 4 4 * put64 put16 0 3 * put64 put16 0 2 * put64 put16 0 1 * put64 put16 0 ack16 0 ack64 0 0 * * put8 put8 0 1 * put8 put8 0 ack8 0 ack8 0 0 * * put4 put4 0 ack4 0 ack4 0 0 * put1 put1 0 ack1 0 ack1 0 0 */ val counterBits = log2Up(maxSize/beatBytes) val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize) // Consider the following waveform for two 4-beat bursts: // ---A----A------------ // -------D-----DDD-DDDD // Under TL rules, the second A can use the same source as the first A, // because the source is released for reuse on the first response beat. // // However, if we fragment the requests, it looks like this: // ---3210-3210--------- // -------3-----210-3210 // ... now we've broken the rules because 210 are twice inflight. // // This phenomenon means we can have essentially 2*maxSize/minSize-1 // fragmented transactions in flight per original transaction source. // // To keep the source unique, we encode the beat counter in the low // bits of the source. To solve the overlap, we use a toggle bit. // Whatever toggle bit the D is reassembling, A will use the opposite. // First, handle the return path val acknum = RegInit(0.U(counterBits.W)) val dOrig = Reg(UInt()) val dToggle = RegInit(false.B) val dFragnum = out.d.bits.source(fragmentBits-1, 0) val dFirst = acknum === 0.U val dLast = dFragnum === 0.U // only for AccessAck (!Data) val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1) val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize)) val dHasData = edgeOut.hasData(out.d.bits) // calculate new acknum val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes) val acknum_size = dsizeOH1 >> log2Ceil(beatBytes) assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U) val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U) val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes)) // calculate the original size val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1) when (out.d.fire) { acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement) when (dFirst) { dOrig := dFirst_size dToggle := out.d.bits.source(fragmentBits) } } // Swallow up non-data ack fragments val doEarlyAck = earlyAck match { case EarlyAck.AllPuts => true.B case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1) case EarlyAck.None => false.B } val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast) out.d.ready := in.d.ready || drop in.d.valid := out.d.valid && !drop in.d.bits := out.d.bits // pass most stuff unchanged in.d.bits.source := out.d.bits.source >> addedBits in.d.bits.size := Mux(dFirst, dFirst_size, dOrig) if (edgeOut.manager.mayDenyPut) { val r_denied = Reg(Bool()) val d_denied = (!dFirst && r_denied) || out.d.bits.denied when (out.d.fire) { r_denied := d_denied } in.d.bits.denied := d_denied } if (edgeOut.manager.mayDenyGet) { // Take denied only from the first beat and hold that value val d_denied = out.d.bits.denied holdUnless dFirst when (dHasData) { in.d.bits.denied := d_denied in.d.bits.corrupt := d_denied || out.d.bits.corrupt } } // What maximum transfer sizes do downstream devices support? val maxArithmetics = managers.map(_.supportsArithmetic.max) val maxLogicals = managers.map(_.supportsLogical.max) val maxGets = managers.map(_.supportsGet.max) val maxPutFulls = managers.map(_.supportsPutFull.max) val maxPutPartials = managers.map(_.supportsPutPartial.max) val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0) // We assume that the request is valid => size 0 is impossible val lgMinSize = log2Ceil(minSize).U val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) // Make the request repeatable val repeater = Module(new Repeater(in.a.bits)) repeater.io.enq <> in.a val in_a = repeater.io.deq // If this is infront of a single manager, these become constants val find = manager.findFast(edgeIn.address(in_a.bits)) val maxLgArithmetic = Mux1H(find, maxLgArithmetics) val maxLgLogical = Mux1H(find, maxLgLogicals) val maxLgGet = Mux1H(find, maxLgGets) val maxLgPutFull = Mux1H(find, maxLgPutFulls) val maxLgPutPartial = Mux1H(find, maxLgPutPartials) val maxLgHint = Mux1H(find, maxLgHints) val limit = if (alwaysMin) lgMinSize else MuxLookup(in_a.bits.opcode, lgMinSize)(Array( TLMessages.PutFullData -> maxLgPutFull, TLMessages.PutPartialData -> maxLgPutPartial, TLMessages.ArithmeticData -> maxLgArithmetic, TLMessages.LogicalData -> maxLgLogical, TLMessages.Get -> maxLgGet, TLMessages.Hint -> maxLgHint)) val aOrig = in_a.bits.size val aFrag = Mux(aOrig > limit, limit, aOrig) val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize)) val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize)) val aHasData = edgeIn.hasData(in_a.bits) val aMask = Mux(aHasData, 0.U, aFragOH1) val gennum = RegInit(0.U(counterBits.W)) val aFirst = gennum === 0.U val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U) val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize))) val aLast = aFragnum === 0.U val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst)) val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None when (out.a.fire) { gennum := new_gennum } repeater.io.repeat := !aHasData && aFragnum =/= 0.U out.a <> in_a out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U) out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum)) out.a.bits.size := aFrag // Optimize away some of the Repeater's registers assert (!repeater.io.full || !aHasData) out.a.bits.data := in.a.bits.data val fullMask = ((BigInt(1) << beatBytes) - 1).U assert (!repeater.io.full || in_a.bits.mask === fullMask) out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask) out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData) // Tie off unused channels in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLFragmenter { def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { if (minSize <= maxSize) { val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix)) fragmenter.node } else { TLEphemeralNode()(ValName("no_fragmenter")) } } def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix) def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Fragmenter")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes)) (ram.node := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := TLDelayer(0.1) := TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts) := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := TLFragmenter(ramBeatBytes, maxSize/2) := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module) io.finished := dut.io.finished dut.io.start := io.start } File LazyRoCC.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.tile import chisel3._ import chisel3.util._ import chisel3.experimental.IntParam import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.rocket.{ MStatus, HellaCacheIO, TLBPTWIO, CanHavePTW, CanHavePTWModule, SimpleHellaCacheIF, M_XRD, PTE, PRV, M_SZ } import freechips.rocketchip.tilelink.{ TLNode, TLIdentityNode, TLClientNode, TLMasterParameters, TLMasterPortParameters } import freechips.rocketchip.util.InOrderArbiter case object BuildRoCC extends Field[Seq[Parameters => LazyRoCC]](Nil) class RoCCInstruction extends Bundle { val funct = Bits(7.W) val rs2 = Bits(5.W) val rs1 = Bits(5.W) val xd = Bool() val xs1 = Bool() val xs2 = Bool() val rd = Bits(5.W) val opcode = Bits(7.W) } class RoCCCommand(implicit p: Parameters) extends CoreBundle()(p) { val inst = new RoCCInstruction val rs1 = Bits(xLen.W) val rs2 = Bits(xLen.W) val status = new MStatus } class RoCCResponse(implicit p: Parameters) extends CoreBundle()(p) { val rd = Bits(5.W) val data = Bits(xLen.W) } class RoCCCoreIO(val nRoCCCSRs: Int = 0)(implicit p: Parameters) extends CoreBundle()(p) { val cmd = Flipped(Decoupled(new RoCCCommand)) val resp = Decoupled(new RoCCResponse) val mem = new HellaCacheIO val busy = Output(Bool()) val interrupt = Output(Bool()) val exception = Input(Bool()) val csrs = Flipped(Vec(nRoCCCSRs, new CustomCSRIO)) } class RoCCIO(val nPTWPorts: Int, nRoCCCSRs: Int)(implicit p: Parameters) extends RoCCCoreIO(nRoCCCSRs)(p) { val ptw = Vec(nPTWPorts, new TLBPTWIO) val fpu_req = Decoupled(new FPInput) val fpu_resp = Flipped(Decoupled(new FPResult)) } /** Base classes for Diplomatic TL2 RoCC units **/ abstract class LazyRoCC( val opcodes: OpcodeSet, val nPTWPorts: Int = 0, val usesFPU: Boolean = false, val roccCSRs: Seq[CustomCSR] = Nil )(implicit p: Parameters) extends LazyModule { val module: LazyRoCCModuleImp require(roccCSRs.map(_.id).toSet.size == roccCSRs.size) val atlNode: TLNode = TLIdentityNode() val tlNode: TLNode = TLIdentityNode() val stlNode: TLNode = TLIdentityNode() } class LazyRoCCModuleImp(outer: LazyRoCC) extends LazyModuleImp(outer) { val io = IO(new RoCCIO(outer.nPTWPorts, outer.roccCSRs.size)) io := DontCare } /** Mixins for including RoCC **/ trait HasLazyRoCC extends CanHavePTW { this: BaseTile => val roccs = p(BuildRoCC).map(_(p)) val roccCSRs = roccs.map(_.roccCSRs) // the set of custom CSRs requested by all roccs require(roccCSRs.flatten.map(_.id).toSet.size == roccCSRs.flatten.size, "LazyRoCC instantiations require overlapping CSRs") roccs.map(_.atlNode).foreach { atl => tlMasterXbar.node :=* atl } roccs.map(_.tlNode).foreach { tl => tlOtherMastersNode :=* tl } roccs.map(_.stlNode).foreach { stl => stl :*= tlSlaveXbar.node } nPTWPorts += roccs.map(_.nPTWPorts).sum nDCachePorts += roccs.size } trait HasLazyRoCCModule extends CanHavePTWModule with HasCoreParameters { this: RocketTileModuleImp => val (respArb, cmdRouter) = if(outer.roccs.nonEmpty) { val respArb = Module(new RRArbiter(new RoCCResponse()(outer.p), outer.roccs.size)) val cmdRouter = Module(new RoccCommandRouter(outer.roccs.map(_.opcodes))(outer.p)) outer.roccs.zipWithIndex.foreach { case (rocc, i) => rocc.module.io.ptw ++=: ptwPorts rocc.module.io.cmd <> cmdRouter.io.out(i) val dcIF = Module(new SimpleHellaCacheIF()(outer.p)) dcIF.io.requestor <> rocc.module.io.mem dcachePorts += dcIF.io.cache respArb.io.in(i) <> Queue(rocc.module.io.resp) } (Some(respArb), Some(cmdRouter)) } else { (None, None) } val roccCSRIOs = outer.roccs.map(_.module.io.csrs) } class AccumulatorExample(opcodes: OpcodeSet, val n: Int = 4)(implicit p: Parameters) extends LazyRoCC(opcodes) { override lazy val module = new AccumulatorExampleModuleImp(this) } class AccumulatorExampleModuleImp(outer: AccumulatorExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer) with HasCoreParameters { val regfile = Mem(outer.n, UInt(xLen.W)) val busy = RegInit(VecInit(Seq.fill(outer.n){false.B})) val cmd = Queue(io.cmd) val funct = cmd.bits.inst.funct val addr = cmd.bits.rs2(log2Up(outer.n)-1,0) val doWrite = funct === 0.U val doRead = funct === 1.U val doLoad = funct === 2.U val doAccum = funct === 3.U val memRespTag = io.mem.resp.bits.tag(log2Up(outer.n)-1,0) // datapath val addend = cmd.bits.rs1 val accum = regfile(addr) val wdata = Mux(doWrite, addend, accum + addend) when (cmd.fire && (doWrite || doAccum)) { regfile(addr) := wdata } when (io.mem.resp.valid) { regfile(memRespTag) := io.mem.resp.bits.data busy(memRespTag) := false.B } // control when (io.mem.req.fire) { busy(addr) := true.B } val doResp = cmd.bits.inst.xd val stallReg = busy(addr) val stallLoad = doLoad && !io.mem.req.ready val stallResp = doResp && !io.resp.ready cmd.ready := !stallReg && !stallLoad && !stallResp // command resolved if no stalls AND not issuing a load that will need a request // PROC RESPONSE INTERFACE io.resp.valid := cmd.valid && doResp && !stallReg && !stallLoad // valid response if valid command, need a response, and no stalls io.resp.bits.rd := cmd.bits.inst.rd // Must respond with the appropriate tag or undefined behavior io.resp.bits.data := accum // Semantics is to always send out prior accumulator register value io.busy := cmd.valid || busy.reduce(_||_) // Be busy when have pending memory requests or committed possibility of pending requests io.interrupt := false.B // Set this true to trigger an interrupt on the processor (please refer to supervisor documentation) // MEMORY REQUEST INTERFACE io.mem.req.valid := cmd.valid && doLoad && !stallReg && !stallResp io.mem.req.bits.addr := addend io.mem.req.bits.tag := addr io.mem.req.bits.cmd := M_XRD // perform a load (M_XWR for stores) io.mem.req.bits.size := log2Ceil(8).U io.mem.req.bits.signed := false.B io.mem.req.bits.data := 0.U // we're not performing any stores... io.mem.req.bits.phys := false.B io.mem.req.bits.dprv := cmd.bits.status.dprv io.mem.req.bits.dv := cmd.bits.status.dv io.mem.req.bits.no_resp := false.B } class TranslatorExample(opcodes: OpcodeSet)(implicit p: Parameters) extends LazyRoCC(opcodes, nPTWPorts = 1) { override lazy val module = new TranslatorExampleModuleImp(this) } class TranslatorExampleModuleImp(outer: TranslatorExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer) with HasCoreParameters { val req_addr = Reg(UInt(coreMaxAddrBits.W)) val req_rd = Reg(chiselTypeOf(io.resp.bits.rd)) val req_offset = req_addr(pgIdxBits - 1, 0) val req_vpn = req_addr(coreMaxAddrBits - 1, pgIdxBits) val pte = Reg(new PTE) val s_idle :: s_ptw_req :: s_ptw_resp :: s_resp :: Nil = Enum(4) val state = RegInit(s_idle) io.cmd.ready := (state === s_idle) when (io.cmd.fire) { req_rd := io.cmd.bits.inst.rd req_addr := io.cmd.bits.rs1 state := s_ptw_req } private val ptw = io.ptw(0) when (ptw.req.fire) { state := s_ptw_resp } when (state === s_ptw_resp && ptw.resp.valid) { pte := ptw.resp.bits.pte state := s_resp } when (io.resp.fire) { state := s_idle } ptw.req.valid := (state === s_ptw_req) ptw.req.bits.valid := true.B ptw.req.bits.bits.addr := req_vpn io.resp.valid := (state === s_resp) io.resp.bits.rd := req_rd io.resp.bits.data := Mux(pte.leaf(), Cat(pte.ppn, req_offset), -1.S(xLen.W).asUInt) io.busy := (state =/= s_idle) io.interrupt := false.B io.mem.req.valid := false.B } class CharacterCountExample(opcodes: OpcodeSet)(implicit p: Parameters) extends LazyRoCC(opcodes) { override lazy val module = new CharacterCountExampleModuleImp(this) override val atlNode = TLClientNode(Seq(TLMasterPortParameters.v1(Seq(TLMasterParameters.v1("CharacterCountRoCC"))))) } class CharacterCountExampleModuleImp(outer: CharacterCountExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer) with HasCoreParameters with HasL1CacheParameters { val cacheParams = tileParams.dcache.get private val blockOffset = blockOffBits private val beatOffset = log2Up(cacheDataBits/8) val needle = Reg(UInt(8.W)) val addr = Reg(UInt(coreMaxAddrBits.W)) val count = Reg(UInt(xLen.W)) val resp_rd = Reg(chiselTypeOf(io.resp.bits.rd)) val addr_block = addr(coreMaxAddrBits - 1, blockOffset) val offset = addr(blockOffset - 1, 0) val next_addr = (addr_block + 1.U) << blockOffset.U val s_idle :: s_acq :: s_gnt :: s_check :: s_resp :: Nil = Enum(5) val state = RegInit(s_idle) val (tl_out, edgesOut) = outer.atlNode.out(0) val gnt = tl_out.d.bits val recv_data = Reg(UInt(cacheDataBits.W)) val recv_beat = RegInit(0.U(log2Up(cacheDataBeats+1).W)) val data_bytes = VecInit(Seq.tabulate(cacheDataBits/8) { i => recv_data(8 * (i + 1) - 1, 8 * i) }) val zero_match = data_bytes.map(_ === 0.U) val needle_match = data_bytes.map(_ === needle) val first_zero = PriorityEncoder(zero_match) val chars_found = PopCount(needle_match.zipWithIndex.map { case (matches, i) => val idx = Cat(recv_beat - 1.U, i.U(beatOffset.W)) matches && idx >= offset && i.U <= first_zero }) val zero_found = zero_match.reduce(_ || _) val finished = Reg(Bool()) io.cmd.ready := (state === s_idle) io.resp.valid := (state === s_resp) io.resp.bits.rd := resp_rd io.resp.bits.data := count tl_out.a.valid := (state === s_acq) tl_out.a.bits := edgesOut.Get( fromSource = 0.U, toAddress = addr_block << blockOffset, lgSize = lgCacheBlockBytes.U)._2 tl_out.d.ready := (state === s_gnt) when (io.cmd.fire) { addr := io.cmd.bits.rs1 needle := io.cmd.bits.rs2 resp_rd := io.cmd.bits.inst.rd count := 0.U finished := false.B state := s_acq } when (tl_out.a.fire) { state := s_gnt } when (tl_out.d.fire) { recv_beat := recv_beat + 1.U recv_data := gnt.data state := s_check } when (state === s_check) { when (!finished) { count := count + chars_found } when (zero_found) { finished := true.B } when (recv_beat === cacheDataBeats.U) { addr := next_addr state := Mux(zero_found || finished, s_resp, s_acq) recv_beat := 0.U } .otherwise { state := s_gnt } } when (io.resp.fire) { state := s_idle } io.busy := (state =/= s_idle) io.interrupt := false.B io.mem.req.valid := false.B // Tie off unused channels tl_out.b.ready := true.B tl_out.c.valid := false.B tl_out.e.valid := false.B } class BlackBoxExample(opcodes: OpcodeSet, blackBoxFile: String)(implicit p: Parameters) extends LazyRoCC(opcodes) { override lazy val module = new BlackBoxExampleModuleImp(this, blackBoxFile) } class BlackBoxExampleModuleImp(outer: BlackBoxExample, blackBoxFile: String)(implicit p: Parameters) extends LazyRoCCModuleImp(outer) with RequireSyncReset with HasCoreParameters { val blackbox = { val roccIo = io Module( new BlackBox( Map( "xLen" -> IntParam(xLen), "PRV_SZ" -> IntParam(PRV.SZ), "coreMaxAddrBits" -> IntParam(coreMaxAddrBits), "dcacheReqTagBits" -> IntParam(roccIo.mem.req.bits.tag.getWidth), "M_SZ" -> IntParam(M_SZ), "mem_req_bits_size_width" -> IntParam(roccIo.mem.req.bits.size.getWidth), "coreDataBits" -> IntParam(coreDataBits), "coreDataBytes" -> IntParam(coreDataBytes), "paddrBits" -> IntParam(paddrBits), "vaddrBitsExtended" -> IntParam(vaddrBitsExtended), "FPConstants_RM_SZ" -> IntParam(FPConstants.RM_SZ), "fLen" -> IntParam(fLen), "FPConstants_FLAGS_SZ" -> IntParam(FPConstants.FLAGS_SZ) ) ) with HasBlackBoxResource { val io = IO( new Bundle { val clock = Input(Clock()) val reset = Input(Reset()) val rocc = chiselTypeOf(roccIo) }) override def desiredName: String = blackBoxFile addResource(s"/vsrc/$blackBoxFile.v") } ) } blackbox.io.clock := clock blackbox.io.reset := reset blackbox.io.rocc.cmd <> io.cmd io.resp <> blackbox.io.rocc.resp io.mem <> blackbox.io.rocc.mem io.busy := blackbox.io.rocc.busy io.interrupt := blackbox.io.rocc.interrupt blackbox.io.rocc.exception := io.exception io.ptw <> blackbox.io.rocc.ptw io.fpu_req <> blackbox.io.rocc.fpu_req blackbox.io.rocc.fpu_resp <> io.fpu_resp } class OpcodeSet(val opcodes: Seq[UInt]) { def |(set: OpcodeSet) = new OpcodeSet(this.opcodes ++ set.opcodes) def matches(oc: UInt) = opcodes.map(_ === oc).reduce(_ || _) } object OpcodeSet { def custom0 = new OpcodeSet(Seq("b0001011".U)) def custom1 = new OpcodeSet(Seq("b0101011".U)) def custom2 = new OpcodeSet(Seq("b1011011".U)) def custom3 = new OpcodeSet(Seq("b1111011".U)) def all = custom0 | custom1 | custom2 | custom3 } class RoccCommandRouter(opcodes: Seq[OpcodeSet])(implicit p: Parameters) extends CoreModule()(p) { val io = IO(new Bundle { val in = Flipped(Decoupled(new RoCCCommand)) val out = Vec(opcodes.size, Decoupled(new RoCCCommand)) val busy = Output(Bool()) }) val cmd = Queue(io.in) val cmdReadys = io.out.zip(opcodes).map { case (out, opcode) => val me = opcode.matches(cmd.bits.inst.opcode) out.valid := cmd.valid && me out.bits := cmd.bits out.ready && me } cmd.ready := cmdReadys.reduce(_ || _) io.busy := cmd.valid assert(PopCount(cmdReadys) <= 1.U, "Custom opcode matched for more than one accelerator") } File BaseTile.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tile import chisel3._ import chisel3.util.{log2Ceil, log2Up} import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.bundlebridge._ import freechips.rocketchip.resources.{PropertyMap, PropertyOption, ResourceReference, DTSTimebase} import freechips.rocketchip.interrupts.{IntInwardNode, IntOutwardNode} import freechips.rocketchip.rocket.{ICacheParams, DCacheParams, BTBParams, ASIdBits, VMIdBits, TraceAux, BPWatch} import freechips.rocketchip.subsystem.{ HierarchicalElementParams, InstantiableHierarchicalElementParams, HierarchicalElementCrossingParamsLike, CacheBlockBytes, SystemBusKey, BaseHierarchicalElement, InsertTimingClosureRegistersOnHartIds, BaseHierarchicalElementModuleImp } import freechips.rocketchip.tilelink.{TLEphemeralNode, TLOutwardNode, TLNode, TLFragmenter, EarlyAck, TLWidthWidget, TLManagerParameters, ManagerUnification} import freechips.rocketchip.prci.{ClockCrossingType, ClockSinkParameters} import freechips.rocketchip.util.{TraceCoreParams, TraceCoreInterface} import freechips.rocketchip.resources.{BigIntToProperty, IntToProperty, StringToProperty} import freechips.rocketchip.util.BooleanToAugmentedBoolean case object TileVisibilityNodeKey extends Field[TLEphemeralNode] case object TileKey extends Field[TileParams] case object LookupByHartId extends Field[LookupByHartIdImpl] trait TileParams extends HierarchicalElementParams { val core: CoreParams val icache: Option[ICacheParams] val dcache: Option[DCacheParams] val btb: Option[BTBParams] val tileId: Int // may not be hartid val blockerCtrlAddr: Option[BigInt] } abstract class InstantiableTileParams[TileType <: BaseTile] extends InstantiableHierarchicalElementParams[TileType] with TileParams { def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl) (implicit p: Parameters): TileType } /** These parameters values are not computed based on diplomacy negotiation * and so are safe to use while diplomacy itself is running. */ trait HasNonDiplomaticTileParameters { implicit val p: Parameters def tileParams: TileParams = p(TileKey) def usingVM: Boolean = tileParams.core.useVM def usingUser: Boolean = tileParams.core.useUser || usingSupervisor def usingSupervisor: Boolean = tileParams.core.hasSupervisorMode def usingHypervisor: Boolean = usingVM && tileParams.core.useHypervisor def usingDebug: Boolean = tileParams.core.useDebug def usingRoCC: Boolean = !p(BuildRoCC).isEmpty def usingBTB: Boolean = tileParams.btb.isDefined && tileParams.btb.get.nEntries > 0 def usingPTW: Boolean = usingVM def usingDataScratchpad: Boolean = tileParams.dcache.flatMap(_.scratch).isDefined def xLen: Int = tileParams.core.xLen def xBytes: Int = xLen / 8 def iLen: Int = 32 def pgIdxBits: Int = 12 def pgLevelBits: Int = 10 - log2Ceil(xLen / 32) def pgLevels: Int = tileParams.core.pgLevels def maxSVAddrBits: Int = pgIdxBits + pgLevels * pgLevelBits def maxHypervisorExtraAddrBits: Int = 2 def hypervisorExtraAddrBits: Int = { if (usingHypervisor) maxHypervisorExtraAddrBits else 0 } def maxHVAddrBits: Int = maxSVAddrBits + hypervisorExtraAddrBits def minPgLevels: Int = { val res = xLen match { case 32 => 2; case 64 => 3 } require(pgLevels >= res) res } def asIdBits: Int = p(ASIdBits) def vmIdBits: Int = p(VMIdBits) lazy val maxPAddrBits: Int = { require(xLen == 32 || xLen == 64, s"Only XLENs of 32 or 64 are supported, but got $xLen") xLen match { case 32 => 34; case 64 => 56 } } def tileId: Int = tileParams.tileId def cacheBlockBytes = p(CacheBlockBytes) def lgCacheBlockBytes = log2Up(cacheBlockBytes) def masterPortBeatBytes = p(SystemBusKey).beatBytes // TODO make HellaCacheIO diplomatic and remove this brittle collection of hacks // Core PTW DTIM coprocessors def dcacheArbPorts = 1 + usingVM.toInt + usingDataScratchpad.toInt + p(BuildRoCC).size + (tileParams.core.useVector && tileParams.core.vectorUseDCache).toInt // TODO merge with isaString in CSR.scala def isaDTS: String = { val ie = if (tileParams.core.useRVE) "e" else "i" val m = if (tileParams.core.mulDiv.nonEmpty) "m" else "" val a = if (tileParams.core.useAtomics) "a" else "" val f = if (tileParams.core.fpu.nonEmpty) "f" else "" val d = if (tileParams.core.fpu.nonEmpty && tileParams.core.fpu.get.fLen > 32) "d" else "" val c = if (tileParams.core.useCompressed) "c" else "" val b = if (tileParams.core.useBitmanip) "b" else "" val v = if (tileParams.core.useVector && tileParams.core.vLen >= 128 && tileParams.core.eLen == 64 && tileParams.core.vfLen == 64) "v" else "" val h = if (usingHypervisor) "h" else "" val ext_strs = Seq( (tileParams.core.useVector) -> s"zvl${tileParams.core.vLen}b", (tileParams.core.useVector) -> { val c = tileParams.core.vfLen match { case 64 => "d" case 32 => "f" case 0 => "x" } s"zve${tileParams.core.eLen}$c" }, (tileParams.core.useVector && tileParams.core.vfh) -> "zvfh", (tileParams.core.fpu.map(_.fLen >= 16).getOrElse(false) && tileParams.core.minFLen <= 16) -> "zfh", (tileParams.core.useZba) -> "zba", (tileParams.core.useZbb) -> "zbb", (tileParams.core.useZbs) -> "zbs", (tileParams.core.useConditionalZero) -> "zicond" ).filter(_._1).map(_._2) val multiLetterExt = ( // rdcycle[h], rdinstret[h] is implemented // rdtime[h] is not implemented, and could be provided by software emulation // see https://github.com/chipsalliance/rocket-chip/issues/3207 //Some(Seq("zicntr")) ++ Some(Seq("zicsr", "zifencei", "zihpm")) ++ Some(ext_strs) ++ Some(tileParams.core.vExts) ++ tileParams.core.customIsaExt.map(Seq(_)) ).flatten val multiLetterString = multiLetterExt.mkString("_") s"rv$xLen$ie$m$a$f$d$c$b$v$h$multiLetterString" } def tileProperties: PropertyMap = { val dcache = tileParams.dcache.filter(!_.scratch.isDefined).map(d => Map( "d-cache-block-size" -> cacheBlockBytes.asProperty, "d-cache-sets" -> d.nSets.asProperty, "d-cache-size" -> (d.nSets * d.nWays * cacheBlockBytes).asProperty) ).getOrElse(Nil) val incoherent = if (!tileParams.core.useAtomicsOnlyForIO) Nil else Map( "sifive,d-cache-incoherent" -> Nil) val icache = tileParams.icache.map(i => Map( "i-cache-block-size" -> cacheBlockBytes.asProperty, "i-cache-sets" -> i.nSets.asProperty, "i-cache-size" -> (i.nSets * i.nWays * cacheBlockBytes).asProperty) ).getOrElse(Nil) val dtlb = tileParams.dcache.filter(_ => tileParams.core.useVM).map(d => Map( "d-tlb-size" -> (d.nTLBWays * d.nTLBSets).asProperty, "d-tlb-sets" -> d.nTLBSets.asProperty)).getOrElse(Nil) val itlb = tileParams.icache.filter(_ => tileParams.core.useVM).map(i => Map( "i-tlb-size" -> (i.nTLBWays * i.nTLBSets).asProperty, "i-tlb-sets" -> i.nTLBSets.asProperty)).getOrElse(Nil) val mmu = if (tileParams.core.useVM) { if (tileParams.core.useHypervisor) { Map("tlb-split" -> Nil, "mmu-type" -> s"riscv,sv${maxSVAddrBits},sv${maxSVAddrBits}x4".asProperty) } else { Map("tlb-split" -> Nil, "mmu-type" -> s"riscv,sv$maxSVAddrBits".asProperty) } } else { Nil } val pmp = if (tileParams.core.nPMPs > 0) Map( "riscv,pmpregions" -> tileParams.core.nPMPs.asProperty, "riscv,pmpgranularity" -> tileParams.core.pmpGranularity.asProperty) else Nil dcache ++ icache ++ dtlb ++ itlb ++ mmu ++ pmp ++ incoherent } } /** These parameters values are computed based on diplomacy negotiations * and so are NOT safe to use while diplomacy itself is running. * Only mix this trait into LazyModuleImps, Modules, Bundles, Data, etc. */ trait HasTileParameters extends HasNonDiplomaticTileParameters { protected def tlBundleParams = p(TileVisibilityNodeKey).edges.out.head.bundle lazy val paddrBits: Int = { val bits = tlBundleParams.addressBits require(bits <= maxPAddrBits, s"Requested $bits paddr bits, but since xLen is $xLen only $maxPAddrBits will fit") bits } def vaddrBits: Int = if (usingVM) { val v = maxHVAddrBits require(v == xLen || xLen > v && v > paddrBits) v } else { // since virtual addresses sign-extend but physical addresses // zero-extend, make room for a zero sign bit for physical addresses (paddrBits + 1) min xLen } def vpnBits: Int = vaddrBits - pgIdxBits def ppnBits: Int = paddrBits - pgIdxBits def vpnBitsExtended: Int = vpnBits + (if (vaddrBits < xLen) 1 + usingHypervisor.toInt else 0) def vaddrBitsExtended: Int = vpnBitsExtended + pgIdxBits } /** Base class for all Tiles that use TileLink */ abstract class BaseTile private (crossing: ClockCrossingType, q: Parameters) extends BaseHierarchicalElement(crossing)(q) with HasNonDiplomaticTileParameters { // Public constructor alters Parameters to supply some legacy compatibility keys def this(tileParams: TileParams, crossing: ClockCrossingType, lookup: LookupByHartIdImpl, p: Parameters) = { this(crossing, p.alterMap(Map( TileKey -> tileParams, TileVisibilityNodeKey -> TLEphemeralNode()(ValName("tile_master")), LookupByHartId -> lookup ))) } def intInwardNode: IntInwardNode // Interrupts to the core from external devices def intOutwardNode: Option[IntOutwardNode] // Interrupts from tile-internal devices (e.g. BEU) def haltNode: IntOutwardNode // Unrecoverable error has occurred; suggest reset def ceaseNode: IntOutwardNode // Tile has ceased to retire instructions def wfiNode: IntOutwardNode // Tile is waiting for an interrupt def module: BaseTileModuleImp[BaseTile] /** Node for broadcasting a hart id to diplomatic consumers within the tile. */ val hartIdNexusNode: BundleBridgeNode[UInt] = BundleBroadcast[UInt](registered = p(InsertTimingClosureRegistersOnHartIds)) /** Node for consuming the hart id input in tile-layer Chisel logic. */ val hartIdSinkNode = BundleBridgeSink[UInt]() /** Node for driving a hart id input, which is to be broadcast to units within the tile. * * Making this id value an IO and then using it to do lookups of information * that would make otherwise-homogeneous tiles heterogeneous is a useful trick * to enable deduplication of tiles for hierarchical P&R flows. */ val hartIdNode: BundleBridgeInwardNode[UInt] = hartIdSinkNode := hartIdNexusNode := BundleBridgeNameNode("hartid") /** Node for broadcasting a reset vector to diplomatic consumers within the tile. */ val resetVectorNexusNode: BundleBridgeNode[UInt] = BundleBroadcast[UInt]() /** Node for consuming the reset vector input in tile-layer Chisel logic. * * Its width is sized by looking at the size of the address space visible * on the tile's master ports, but this lookup is not evaluated until * diplomacy has completed and Chisel elaboration has begun. */ val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(visiblePhysAddrBits.W))) /** Node for supplying a reset vector that processors in this tile might begin fetching instructions from as they come out of reset. */ val resetVectorNode: BundleBridgeInwardNode[UInt] = resetVectorSinkNode := resetVectorNexusNode := BundleBridgeNameNode("reset_vector") /** Nodes for connecting NMI interrupt sources and vectors into the tile */ val nmiSinkNode = Option.when(tileParams.core.useNMI) { BundleBridgeSink[NMI](Some(() => new NMI(visiblePhysAddrBits))) } val nmiNode: Option[BundleBridgeInwardNode[NMI]] = nmiSinkNode.map(_ := BundleBridgeNameNode("nmi")) /** Node for broadcasting an address prefix to diplomatic consumers within the tile. * * The prefix should be applied by consumers by or-ing ouputs of this node * with a static base address (which is looked up based on the driven hartid value). */ val mmioAddressPrefixNexusNode = BundleBridgeNexus[UInt]( inputFn = BundleBridgeNexus.orReduction[UInt](registered = false) _, outputFn = BundleBridgeNexus.fillN[UInt](registered = false) _, default = Some(() => 0.U(1.W)) ) /** Node for external drivers to prefix base addresses of MMIO devices to which the core has a direct access path. */ val mmioAddressPrefixNode: BundleBridgeInwardNode[UInt] = mmioAddressPrefixNexusNode :=* BundleBridgeNameNode("mmio_address_prefix") // TODO: Any node marked "consumed by the core" or "driven by the core" // should be moved to either be: a member of a specific BaseTile subclass, // or actually just a member of the core's LazyModule itself, // assuming the core itself is diplomatic. // Then these nodes should just become IdentityNodes of their respective type protected def traceRetireWidth = tileParams.core.retireWidth /** Node for the core to drive legacy "raw" instruction trace. */ val traceSourceNode = BundleBridgeSource(() => new TraceBundle) /** Node for external consumers to source a legacy instruction trace from the core. */ val traceNode = traceSourceNode def traceCoreParams = new TraceCoreParams() /** Node for core to drive instruction trace conforming to RISC-V Processor Trace spec V1.0 */ val traceCoreSourceNode = BundleBridgeSource(() => new TraceCoreInterface(traceCoreParams)) /** Node for external consumers to source a V1.0 instruction trace from the core. */ val traceCoreNode = traceCoreSourceNode /** Node to broadcast collected trace sideband signals into the tile. */ val traceAuxNexusNode = BundleBridgeNexus[TraceAux](default = Some(() => { val aux = Wire(new TraceAux) aux.stall := false.B aux.enable := false.B aux })) /** Trace sideband signals to be consumed by the core. */ val traceAuxSinkNode = BundleBridgeSink[TraceAux]() /** Trace sideband signals collected here to be driven into the tile. */ val traceAuxNode: BundleBridgeInwardNode[TraceAux] = traceAuxSinkNode := traceAuxNexusNode :=* BundleBridgeNameNode("trace_aux") /** Node for watchpoints to control trace driven by core. */ val bpwatchSourceNode = BundleBridgeSource(() => Vec(tileParams.core.nBreakpoints, new BPWatch(traceRetireWidth))) /** Node to broadcast watchpoints to control trace. */ val bpwatchNexusNode = BundleBroadcast[Vec[BPWatch]]() /** Node for external consumers to source watchpoints to control trace. */ val bpwatchNode: BundleBridgeOutwardNode[Vec[BPWatch]] = BundleBridgeNameNode("bpwatch") :*= bpwatchNexusNode := bpwatchSourceNode /** Helper function for connecting MMIO devices inside the tile to an xbar that will make them visible to external masters. */ def connectTLSlave(xbarNode: TLOutwardNode, node: TLNode, bytes: Int): Unit = { DisableMonitors { implicit p => (Seq(node, TLFragmenter(bytes, cacheBlockBytes, earlyAck=EarlyAck.PutFulls)) ++ (xBytes != bytes).option(TLWidthWidget(xBytes))) .foldRight(xbarNode)(_ :*= _) } } def connectTLSlave(node: TLNode, bytes: Int): Unit = { connectTLSlave(tlSlaveXbar.node, node, bytes) } /** TileLink node which represents the view that the intra-tile masters have of the rest of the system. */ val visibilityNode = p(TileVisibilityNodeKey) protected def visibleManagers = visibilityNode.edges.out.flatMap(_.manager.managers) protected def visiblePhysAddrBits = visibilityNode.edges.out.head.bundle.addressBits def unifyManagers: List[TLManagerParameters] = ManagerUnification(visibleManagers) /** Finds resource labels for all the outward caches. */ def nextLevelCacheProperty: PropertyOption = { val outer = visibleManagers .filter(_.supportsAcquireB) .flatMap(_.resources.headOption) .map(_.owner.label) .distinct if (outer.isEmpty) None else Some("next-level-cache" -> outer.map(l => ResourceReference(l)).toList) } /** Create a DTS representation of this "cpu". */ def cpuProperties: PropertyMap = Map( "device_type" -> "cpu".asProperty, "status" -> "okay".asProperty, "clock-frequency" -> tileParams.core.bootFreqHz.asProperty, "riscv,isa" -> isaDTS.asProperty, "timebase-frequency" -> p(DTSTimebase).asProperty, "hardware-exec-breakpoint-count" -> tileParams.core.nBreakpoints.asProperty ) /** Can be used to access derived params calculated by HasCoreParameters * * However, callers must ensure they do not access a diplomatically-determined parameter * before the graph in question has been fully connected. */ protected lazy val lazyCoreParamsView: HasCoreParameters = { class C(implicit val p: Parameters) extends HasCoreParameters new C } this.suggestName(tileParams.baseName) } abstract class BaseTileModuleImp[+L <: BaseTile](outer: L) extends BaseHierarchicalElementModuleImp[L](outer) File HellaCache.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3.{dontTouch, _} import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.amba.AMBAProtField import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType} import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters} import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata} import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle} import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt} import scala.collection.mutable.ListBuffer case class DCacheParams( nSets: Int = 64, nWays: Int = 4, rowBits: Int = 64, subWordBits: Option[Int] = None, replacementPolicy: String = "random", nTLBSets: Int = 1, nTLBWays: Int = 32, nTLBBasePageSectors: Int = 4, nTLBSuperpages: Int = 4, tagECC: Option[String] = None, dataECC: Option[String] = None, dataECCBytes: Int = 1, nMSHRs: Int = 1, nSDQ: Int = 17, nRPQ: Int = 16, nMMIOs: Int = 1, blockBytes: Int = 64, separateUncachedResp: Boolean = false, acquireBeforeRelease: Boolean = false, pipelineWayMux: Boolean = false, clockGate: Boolean = false, scratch: Option[BigInt] = None) extends L1CacheParams { def tagCode: Code = Code.fromString(tagECC) def dataCode: Code = Code.fromString(dataECC) def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0) def replacement = new RandomReplacement(nWays) def silentDrop: Boolean = !acquireBeforeRelease require((!scratch.isDefined || nWays == 1), "Scratchpad only allowed in direct-mapped cache.") require((!scratch.isDefined || nMSHRs == 0), "Scratchpad only allowed in blocking cache.") if (scratch.isEmpty) require(isPow2(nSets), s"nSets($nSets) must be pow2") } trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters { val cacheParams = tileParams.dcache.get val cfg = cacheParams def wordBits = coreDataBits def wordBytes = coreDataBytes def subWordBits = cacheParams.subWordBits.getOrElse(wordBits) def subWordBytes = subWordBits / 8 def wordOffBits = log2Up(wordBytes) def beatBytes = cacheBlockBytes / cacheDataBeats def beatWords = beatBytes / wordBytes def beatOffBits = log2Up(beatBytes) def idxMSB = untagBits-1 def idxLSB = blockOffBits def offsetmsb = idxLSB-1 def offsetlsb = wordOffBits def rowWords = rowBits/wordBits def doNarrowRead = coreDataBits * nWays % rowBits == 0 def eccBytes = cacheParams.dataECCBytes val eccBits = cacheParams.dataECCBytes * 8 val encBits = cacheParams.dataCode.width(eccBits) val encWordBits = encBits * (wordBits / eccBits) def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only def encRowBits = encDataBits*rowWords def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed def lrscBackoff = 3 // disallow LRSC reacquisition briefly def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant def nIOMSHRs = cacheParams.nMMIOs def maxUncachedInFlight = cacheParams.nMMIOs def dataScratchpadSize = cacheParams.dataScratchpadBytes require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)") if (!usingDataScratchpad) require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)") // would need offset addr for puts if data width < xlen require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)") } abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module with HasL1HellaCacheParameters abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasL1HellaCacheParameters /** Bundle definitions for HellaCache interfaces */ trait HasCoreMemOp extends HasL1HellaCacheParameters { val addr = UInt(coreMaxAddrBits.W) val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W)) val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W) val cmd = UInt(M_SZ.W) val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W) val signed = Bool() val dprv = UInt(PRV.SZ.W) val dv = Bool() } trait HasCoreData extends HasCoreParameters { val data = UInt(coreDataBits.W) val mask = UInt(coreDataBytes.W) } class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp { val phys = Bool() val no_resp = Bool() // The dcache may omit generating a response for this request val no_alloc = Bool() val no_xcpt = Bool() } class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp with HasCoreData { val replay = Bool() val has_data = Bool() val data_word_bypass = UInt(coreDataBits.W) val data_raw = UInt(coreDataBits.W) val store_data = UInt(coreDataBits.W) } class AlignmentExceptions extends Bundle { val ld = Bool() val st = Bool() } class HellaCacheExceptions extends Bundle { val ma = new AlignmentExceptions val pf = new AlignmentExceptions val gf = new AlignmentExceptions val ae = new AlignmentExceptions } class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData class HellaCachePerfEvents extends Bundle { val acquire = Bool() val release = Bool() val grant = Bool() val tlbMiss = Bool() val blocked = Bool() val canAcceptStoreThenLoad = Bool() val canAcceptStoreThenRMW = Bool() val canAcceptLoadThenLoad = Bool() val storeBufferEmptyAfterLoad = Bool() val storeBufferEmptyAfterStore = Bool() } // interface between D$ and processor/DTLB class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) { val req = Decoupled(new HellaCacheReq) val s1_kill = Output(Bool()) // kill previous cycle's req val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req val s2_nack = Input(Bool()) // req from two cycles ago is rejected val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint) val s2_kill = Output(Bool()) // kill req from two cycles ago val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO val s2_paddr = Input(UInt(paddrBits.W)) // translated address val resp = Flipped(Valid(new HellaCacheResp)) val replay_next = Input(Bool()) val s2_xcpt = Input(new HellaCacheExceptions) val s2_gpa = Input(UInt(vaddrBitsExtended.W)) val s2_gpa_is_pte = Input(Bool()) val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp))) val ordered = Input(Bool()) val store_pending = Input(Bool()) // there is a store in a store buffer somewhere val perf = Input(new HellaCachePerfEvents()) val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself? val clock_enabled = Input(Bool()) // is D$ currently being clocked? } /** Base classes for Diplomatic TL2 HellaCaches */ abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule with HasNonDiplomaticTileParameters { protected val cfg = tileParams.dcache.get protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache", sourceId = IdRange(0, 1 max cfg.nMSHRs), supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))) protected def mmioClientParameters = Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache MMIO", sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs), requestFifo = true)) def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max val node = TLClientNode(Seq(TLMasterPortParameters.v1( clients = cacheClientParameters ++ mmioClientParameters, minLatency = 1, requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField()))))) val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val module: HellaCacheModule def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT) def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits) require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$") } class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) { val cpu = Flipped(new HellaCacheIO) val ptw = new TLBPTWIO() val errors = new DCacheErrors val tlb_port = new DCacheTLBPort } class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer) with HasL1HellaCacheParameters { implicit val edge: TLEdgeOut = outer.node.edges.out(0) val (tl_out, _) = outer.node.out(0) val io = IO(new HellaCacheBundle) val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle) val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle) dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals dontTouch(io.cpu.s1_data) require(rowBits == edge.bundle.dataBits) private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile) fifoManagers.foreach { m => require (m.fifoId == fifoManagers.head.fifoId, s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+ s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}") } } /** Support overriding which HellaCache is instantiated */ case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply) object HellaCacheFactory { def apply(tile: BaseTile)(p: Parameters): HellaCache = { if (tile.tileParams.dcache.get.nMSHRs == 0) new DCache(tile.tileId, tile.crossing)(p) else new NonBlockingDCache(tile.tileId)(p) } } /** Mix-ins for constructing tiles that have a HellaCache */ trait HasHellaCache { this: BaseTile => val module: HasHellaCacheModule implicit val p: Parameters var nDCachePorts = 0 lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p)) tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode } dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode } InModuleBody { dcache.module.io.tlb_port := DontCare } } trait HasHellaCacheModule { val outer: HasHellaCache with HasTileParameters implicit val p: Parameters val dcachePorts = ListBuffer[HellaCacheIO]() val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p)) outer.dcache.module.io.cpu <> dcacheArb.io.mem } /** Metadata array used for all HellaCaches */ class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val coh = new ClientMetadata val tag = UInt(tagBits.W) } object L1Metadata { def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = { val meta = Wire(new L1Metadata) meta.tag := tag meta.coh := coh meta } } class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val tag = UInt(tagBits.W) } class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) { val data = new L1Metadata } class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) val resp = Output(Vec(nWays, rstVal.cloneType)) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U val waddr = Mux(rst, rst_cnt, io.write.bits.idx) val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools when (rst) { rst_cnt := rst_cnt+1.U } val metabits = rstVal.getWidth val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W))) val wen = rst || io.write.valid when (wen) { tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask) } io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal))) io.read.ready := !wen // so really this could be a 6T RAM io.write.ready := !rst } File Interrupts.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tile import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.resources.{Device, DeviceSnippet, Description, ResourceBinding, ResourceInt} import freechips.rocketchip.interrupts.{IntIdentityNode, IntSinkNode, IntSinkPortSimple, IntSourceNode, IntSourcePortSimple} import freechips.rocketchip.util.CanHaveErrors import freechips.rocketchip.resources.{IntToProperty, StringToProperty} import freechips.rocketchip.util.BooleanToAugmentedBoolean class NMI(val w: Int) extends Bundle { val rnmi = Bool() val rnmi_interrupt_vector = UInt(w.W) val rnmi_exception_vector = UInt(w.W) } class TileInterrupts(implicit p: Parameters) extends CoreBundle()(p) { val debug = Bool() val mtip = Bool() val msip = Bool() val meip = Bool() val seip = usingSupervisor.option(Bool()) val lip = Vec(coreParams.nLocalInterrupts, Bool()) val nmi = usingNMI.option(new NMI(resetVectorLen)) } // Use diplomatic interrupts to external interrupts from the subsystem into the tile trait SinksExternalInterrupts { this: BaseTile => val intInwardNode = intXbar.intnode :=* IntIdentityNode()(ValName("int_local")) protected val intSinkNode = IntSinkNode(IntSinkPortSimple()) intSinkNode := intXbar.intnode def cpuDevice: Device val intcDevice = new DeviceSnippet { override def parent = Some(cpuDevice) def describe(): Description = { Description("interrupt-controller", Map( "compatible" -> "riscv,cpu-intc".asProperty, "interrupt-controller" -> Nil, "#interrupt-cells" -> 1.asProperty)) } } ResourceBinding { intSinkNode.edges.in.flatMap(_.source.sources).map { case s => for (i <- s.range.start until s.range.end) { csrIntMap.lift(i).foreach { j => s.resources.foreach { r => r.bind(intcDevice, ResourceInt(j)) } } } } } // TODO: the order of the following two functions must match, and // also match the order which things are connected to the // per-tile crossbar in subsystem.HasTiles.connectInterrupts // debug, msip, mtip, meip, seip, lip offsets in CSRs def csrIntMap: List[Int] = { val nlips = tileParams.core.nLocalInterrupts val seip = if (usingSupervisor) Seq(9) else Nil List(65535, 3, 7, 11) ++ seip ++ List.tabulate(nlips)(_ + 16) } // go from flat diplomatic Interrupts to bundled TileInterrupts def decodeCoreInterrupts(core: TileInterrupts): Unit = { val async_ips = Seq(core.debug) val periph_ips = Seq( core.msip, core.mtip, core.meip) val seip = if (core.seip.isDefined) Seq(core.seip.get) else Nil val core_ips = core.lip val (interrupts, _) = intSinkNode.in(0) (async_ips ++ periph_ips ++ seip ++ core_ips).zip(interrupts).foreach { case(c, i) => c := i } } } trait SourcesExternalNotifications { this: BaseTile => // Report unrecoverable error conditions val haltNode = IntSourceNode(IntSourcePortSimple()) def reportHalt(could_halt: Option[Bool]): Unit = { val (halt_and_catch_fire, _) = haltNode.out(0) halt_and_catch_fire(0) := could_halt.map(RegEnable(true.B, false.B, _)).getOrElse(false.B) } def reportHalt(errors: Seq[CanHaveErrors]): Unit = { reportHalt(errors.flatMap(_.uncorrectable).map(_.valid).reduceOption(_||_)) } // Report when the tile has ceased to retire instructions val ceaseNode = IntSourceNode(IntSourcePortSimple()) def reportCease(could_cease: Option[Bool], quiescenceCycles: Int = 8): Unit = { def waitForQuiescence(cease: Bool): Bool = { // don't report cease until signal is stable for longer than any pipeline depth val count = RegInit(0.U(log2Ceil(quiescenceCycles + 1).W)) val saturated = count >= quiescenceCycles.U when (!cease) { count := 0.U } when (cease && !saturated) { count := count + 1.U } saturated } val (cease, _) = ceaseNode.out(0) cease(0) := could_cease.map{ c => val cease = (waitForQuiescence(c)) // Test-Only Code -- val prev_cease = RegNext(cease, false.B) assert(!(prev_cease & !cease), "CEASE line can not glitch once raised") cease }.getOrElse(false.B) } // Report when the tile is waiting for an interrupt val wfiNode = IntSourceNode(IntSourcePortSimple()) def reportWFI(could_wfi: Option[Bool]): Unit = { val (wfi, _) = wfiNode.out(0) wfi(0) := could_wfi.map(RegNext(_, init=false.B)).getOrElse(false.B) } } File Protocol.scala: package rerocc.bus import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tile._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util._ import rerocc.client.{ReRoCCClientParams} import rerocc.manager.{ReRoCCManagerParams} object ReRoCCProtocol { val width = 3 val mAcquire = 0.U(width.W) // beat0: data = inst // beat1: data = mstatus[63:0] // beat2: data = mstatus[127:64] val mInst = 1.U(width.W) // beat0: data = mstatus[63:0] // beat1: data = mstatus[127:0] val mUStatus = 2.U(width.W) // beat0: data = ptbr val mUPtbr = 3.U(width.W) val mRelease = 4.U(width.W) val mUnbusy = 5.U(width.W) // data // data = acquired val sAcqResp = 0.U(width.W) // data = 0 val sInstAck = 1.U(width.W) // beat0: data = data // beat1: data = rd val sWrite = 2.U(width.W) val sRelResp = 3.U(width.W) val sUnbusyAck = 4.U(width.W) val MAX_BEATS = 3 } class ReRoCCMsgBundle(val params: ReRoCCBundleParams) extends Bundle { val opcode = UInt(ReRoCCProtocol.width.W) val client_id = UInt(params.clientIdBits.W) val manager_id = UInt(params.managerIdBits.W) val data = UInt(64.W) } object ReRoCCMsgFirstLast { def apply(m: DecoupledIO[ReRoCCMsgBundle], isReq: Boolean): (Bool, Bool, UInt) = { val beat = RegInit(0.U(log2Ceil(ReRoCCProtocol.MAX_BEATS).W)) val max_beat = RegInit(0.U(log2Ceil(ReRoCCProtocol.MAX_BEATS).W)) val first = beat === 0.U val last = Wire(Bool()) val inst = m.bits.data.asTypeOf(new RoCCInstruction) when (m.fire && first) { max_beat := 0.U if (isReq) { when (m.bits.opcode === ReRoCCProtocol.mInst) { max_beat := inst.xs1 +& inst.xs2 } .elsewhen (m.bits.opcode === ReRoCCProtocol.mUStatus) { max_beat := 1.U } } else { when (m.bits.opcode === ReRoCCProtocol.sWrite) { max_beat := 1.U } } } last := true.B if (isReq) { when (m.bits.opcode === ReRoCCProtocol.mUStatus) { last := beat === max_beat && !first } .elsewhen (m.bits.opcode === ReRoCCProtocol.mInst) { last := Mux(first, !inst.xs1 && !inst.xs2, beat === max_beat) } } else { when (m.bits.opcode === ReRoCCProtocol.sWrite) { last := beat === max_beat && !first } } when (m.fire) { beat := beat + 1.U } when (m.fire && last) { max_beat := 0.U beat := 0.U } (first, last, beat) } } class ReRoCCBundle(val params: ReRoCCBundleParams) extends Bundle { val req = Decoupled(new ReRoCCMsgBundle(params)) val resp = Flipped(Decoupled(new ReRoCCMsgBundle(params))) } case class EmptyParams() object ReRoCCImp extends SimpleNodeImp[ReRoCCClientPortParams, ReRoCCManagerPortParams, ReRoCCEdgeParams, ReRoCCBundle] { def edge(pd: ReRoCCClientPortParams, pu: ReRoCCManagerPortParams, p: Parameters, sourceInfo: SourceInfo) = { ReRoCCEdgeParams(pu, pd) } def bundle(e: ReRoCCEdgeParams) = new ReRoCCBundle(e.bundle) def render(ei: ReRoCCEdgeParams) = RenderedEdge(colour = "#000000" /* black */) } case class ReRoCCClientNode(clientParams: ReRoCCClientParams)(implicit valName: ValName) extends SourceNode(ReRoCCImp)(Seq(ReRoCCClientPortParams(Seq(clientParams)))) case class ReRoCCManagerNode(managerParams: ReRoCCManagerParams)(implicit valName: ValName) extends SinkNode(ReRoCCImp)(Seq(ReRoCCManagerPortParams(Seq(managerParams)))) class ReRoCCBuffer(b: BufferParams = BufferParams.default)(implicit p: Parameters) extends LazyModule { val node = new AdapterNode(ReRoCCImp)({s => s}, {s => s}) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, _), (out, _)) => out.req <> b(in.req) in.resp <> b(out.resp) } } } object ReRoCCBuffer { def apply(b: BufferParams = BufferParams.default)(implicit p: Parameters) = { val rerocc_buffer = LazyModule(new ReRoCCBuffer(b)(p)) rerocc_buffer.node } } case class ReRoCCIdentityNode()(implicit valName: ValName) extends IdentityNode(ReRoCCImp)() File WidthWidget.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.AddressSet import freechips.rocketchip.util.{Repeater, UIntToOH1} // innBeatBytes => the new client-facing bus width class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule { private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes val node = new TLAdapterNode( clientFn = { case c => c }, managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){ override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired) } override lazy val desiredName = s"TLWidthWidget$innerBeatBytes" lazy val module = new Impl class Impl extends LazyModuleImp(this) { def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = outBytes / inBytes val keepBits = log2Ceil(outBytes) val dropBits = log2Ceil(inBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR } val corrupt_reg = RegInit(false.B) val corrupt_in = edgeIn.corrupt(in.bits) val corrupt_out = corrupt_in || corrupt_reg when (in.fire) { count := count + 1.U corrupt_reg := corrupt_out when (last) { count := 0.U corrupt_reg := false.B } } def helper(idata: UInt): UInt = { // rdata is X until the first time a multi-beat write occurs. // Prevent the X from leaking outside by jamming the mux control until // the first time rdata is written (and hence no longer X). val rdata_written_once = RegInit(false.B) val masked_enable = enable.map(_ || !rdata_written_once) val odata = Seq.fill(ratio) { WireInit(idata) } val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata))) val pdata = rdata :+ idata val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) } when (in.fire && !last) { rdata_written_once := true.B (rdata zip mdata) foreach { case (r, m) => r := m } } Cat(mdata.reverse) } in.ready := out.ready || !last out.valid := in.valid && last out.bits := in.bits // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits))) edgeOut.corrupt(out.bits) := corrupt_out (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleC, i: TLBundleC) => () case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossible bundle combination in WidthWidget") } } def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = inBytes / outBytes val keepBits = log2Ceil(inBytes) val dropBits = log2Ceil(outBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData when (out.fire) { count := count + 1.U when (last) { count := 0.U } } // For sub-beat transfer, extract which part matters val sel = in.bits match { case a: TLBundleA => a.address(keepBits-1, dropBits) case b: TLBundleB => b.address(keepBits-1, dropBits) case c: TLBundleC => c.address(keepBits-1, dropBits) case d: TLBundleD => { val sel = sourceMap(d.source) val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway } } val index = sel | count def helper(idata: UInt, width: Int): UInt = { val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) } mux(index) } out.bits := in.bits out.valid := in.valid in.ready := out.ready // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8)) (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1) case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1) case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossbile bundle combination in WidthWidget") } // Repeat the input if we're not last !last } def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) { // nothing to do; pass it through out.bits := in.bits out.valid := in.valid in.ready := out.ready } else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) { // split input to output val repeat = Wire(Bool()) val repeated = Repeater(in, repeat) val cated = Wire(chiselTypeOf(repeated)) cated <> repeated edgeIn.data(cated.bits) := Cat( edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8), edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0)) repeat := split(edgeIn, cated, edgeOut, out, sourceMap) } else { // merge input to output merge(edgeIn, in, edgeOut, out) } } (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // If the master is narrower than the slave, the D channel must be narrowed. // This is tricky, because the D channel has no address data. // Thus, you don't know which part of a sub-beat transfer to extract. // To fix this, we record the relevant address bits for all sources. // The assumption is that this sort of situation happens only where // you connect a narrow master to the system bus, so there are few sources. def sourceMap(source_bits: UInt) = { val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes) val keepBits = log2Ceil(edgeOut.manager.beatBytes) val dropBits = log2Ceil(edgeIn.manager.beatBytes) val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W))) val a_sel = in.a.bits.address(keepBits-1, dropBits) when (in.a.fire) { if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning sources(0) := a_sel } else { sources(in.a.bits.source) := a_sel } } // depopulate unused source registers: edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U } val bypass = in.a.valid && in.a.bits.source === source if (edgeIn.manager.minLatency > 0) sources(source) else Mux(bypass, a_sel, sources(source)) } splice(edgeIn, in.a, edgeOut, out.a, sourceMap) splice(edgeOut, out.d, edgeIn, in.d, sourceMap) if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) { splice(edgeOut, out.b, edgeIn, in.b, sourceMap) splice(edgeIn, in.c, edgeOut, out.c, sourceMap) out.e.valid := in.e.valid out.e.bits := in.e.bits in.e.ready := out.e.ready } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLWidthWidget { def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode = { val widget = LazyModule(new TLWidthWidget(innerBeatBytes)) widget.node } def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("WidthWidget")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) (ram.node := TLDelayer(0.1) := TLFragmenter(4, 256) := TLWidthWidget(second) := TLWidthWidget(first) := TLDelayer(0.1) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module) dut.io.start := DontCare io.finished := dut.io.finished } File Frontend.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.tile.{CoreBundle, BaseTile} import freechips.rocketchip.tilelink.{TLWidthWidget, TLEdgeOut} import freechips.rocketchip.util.{ClockGate, ShiftQueue, property} import freechips.rocketchip.util.UIntToAugmentedUInt class FrontendReq(implicit p: Parameters) extends CoreBundle()(p) { val pc = UInt(vaddrBitsExtended.W) val speculative = Bool() } class FrontendExceptions extends Bundle { val pf = new Bundle { val inst = Bool() } val gf = new Bundle { val inst = Bool() } val ae = new Bundle { val inst = Bool() } } class FrontendResp(implicit p: Parameters) extends CoreBundle()(p) { val btb = new BTBResp val pc = UInt(vaddrBitsExtended.W) // ID stage PC val data = UInt((fetchWidth * coreInstBits).W) val mask = Bits(fetchWidth.W) val xcpt = new FrontendExceptions val replay = Bool() } class FrontendPerfEvents extends Bundle { val acquire = Bool() val tlbMiss = Bool() } class FrontendIO(implicit p: Parameters) extends CoreBundle()(p) { val might_request = Output(Bool()) val clock_enabled = Input(Bool()) val req = Valid(new FrontendReq) val sfence = Valid(new SFenceReq) val resp = Flipped(Decoupled(new FrontendResp)) val gpa = Flipped(Valid(UInt(vaddrBitsExtended.W))) val gpa_is_pte = Input(Bool()) val btb_update = Valid(new BTBUpdate) val bht_update = Valid(new BHTUpdate) val ras_update = Valid(new RASUpdate) val flush_icache = Output(Bool()) val npc = Input(UInt(vaddrBitsExtended.W)) val perf = Input(new FrontendPerfEvents()) val progress = Output(Bool()) } class Frontend(val icacheParams: ICacheParams, tileId: Int)(implicit p: Parameters) extends LazyModule { lazy val module = new FrontendModule(this) val icache = LazyModule(new ICache(icacheParams, tileId)) val masterNode = icache.masterNode val slaveNode = icache.slaveNode val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(masterNode.edges.out.head.bundle.addressBits.W))) } class FrontendBundle(val outer: Frontend) extends CoreBundle()(outer.p) { val cpu = Flipped(new FrontendIO()) val ptw = new TLBPTWIO() val errors = new ICacheErrors } class FrontendModule(outer: Frontend) extends LazyModuleImp(outer) with HasRocketCoreParameters with HasL1ICacheParameters { val io = IO(new FrontendBundle(outer)) val io_reset_vector = outer.resetVectorSinkNode.bundle implicit val edge: TLEdgeOut = outer.masterNode.edges.out(0) val icache = outer.icache.module require(fetchWidth*coreInstBytes == outer.icacheParams.fetchBytes) val fq = withReset(reset.asBool || io.cpu.req.valid) { Module(new ShiftQueue(new FrontendResp, 5, flow = true)) } val clock_en_reg = Reg(Bool()) val clock_en = clock_en_reg || io.cpu.might_request io.cpu.clock_enabled := clock_en assert(!(io.cpu.req.valid || io.cpu.sfence.valid || io.cpu.flush_icache || io.cpu.bht_update.valid || io.cpu.btb_update.valid) || io.cpu.might_request) val gated_clock = if (!rocketParams.clockGate) clock else ClockGate(clock, clock_en, "icache_clock_gate") icache.clock := gated_clock icache.io.clock_enabled := clock_en withClock (gated_clock) { // entering gated-clock domain val tlb = Module(new TLB(true, log2Ceil(fetchBytes), TLBConfig(nTLBSets, nTLBWays, outer.icacheParams.nTLBBasePageSectors, outer.icacheParams.nTLBSuperpages))) val s1_valid = Reg(Bool()) val s2_valid = RegInit(false.B) val s0_fq_has_space = !fq.io.mask(fq.io.mask.getWidth-3) || (!fq.io.mask(fq.io.mask.getWidth-2) && (!s1_valid || !s2_valid)) || (!fq.io.mask(fq.io.mask.getWidth-1) && (!s1_valid && !s2_valid)) val s0_valid = io.cpu.req.valid || s0_fq_has_space s1_valid := s0_valid val s1_pc = Reg(UInt(vaddrBitsExtended.W)) val s1_speculative = Reg(Bool()) val s2_pc = RegInit(t = UInt(vaddrBitsExtended.W), alignPC(io_reset_vector)) val s2_btb_resp_valid = if (usingBTB) Reg(Bool()) else false.B val s2_btb_resp_bits = Reg(new BTBResp) val s2_btb_taken = s2_btb_resp_valid && s2_btb_resp_bits.taken val s2_tlb_resp = Reg(tlb.io.resp.cloneType) val s2_xcpt = s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst || s2_tlb_resp.gf.inst val s2_speculative = RegInit(false.B) val s2_partial_insn_valid = RegInit(false.B) val s2_partial_insn = Reg(UInt(coreInstBits.W)) val wrong_path = RegInit(false.B) val s1_base_pc = ~(~s1_pc | (fetchBytes - 1).U) val ntpc = s1_base_pc + fetchBytes.U val predicted_npc = WireDefault(ntpc) val predicted_taken = WireDefault(false.B) val s2_replay = Wire(Bool()) s2_replay := (s2_valid && !fq.io.enq.fire) || RegNext(s2_replay && !s0_valid, true.B) val npc = Mux(s2_replay, s2_pc, predicted_npc) s1_pc := io.cpu.npc // consider RVC fetches across blocks to be non-speculative if the first // part was non-speculative val s0_speculative = if (usingCompressed) s1_speculative || s2_valid && !s2_speculative || predicted_taken else true.B s1_speculative := Mux(io.cpu.req.valid, io.cpu.req.bits.speculative, Mux(s2_replay, s2_speculative, s0_speculative)) val s2_redirect = WireDefault(io.cpu.req.valid) s2_valid := false.B when (!s2_replay) { s2_valid := !s2_redirect s2_pc := s1_pc s2_speculative := s1_speculative s2_tlb_resp := tlb.io.resp } val recent_progress_counter_init = 3.U val recent_progress_counter = RegInit(recent_progress_counter_init) val recent_progress = recent_progress_counter > 0.U when(io.ptw.req.fire && recent_progress) { recent_progress_counter := recent_progress_counter - 1.U } when(io.cpu.progress) { recent_progress_counter := recent_progress_counter_init } val s2_kill_speculative_tlb_refill = s2_speculative && !recent_progress io.ptw <> tlb.io.ptw tlb.io.req.valid := s1_valid && !s2_replay tlb.io.req.bits.cmd := M_XRD // Frontend only reads tlb.io.req.bits.vaddr := s1_pc tlb.io.req.bits.passthrough := false.B tlb.io.req.bits.size := log2Ceil(coreInstBytes*fetchWidth).U tlb.io.req.bits.prv := io.ptw.status.prv tlb.io.req.bits.v := io.ptw.status.v tlb.io.sfence := io.cpu.sfence tlb.io.kill := !s2_valid || s2_kill_speculative_tlb_refill icache.io.req.valid := s0_valid icache.io.req.bits.addr := io.cpu.npc icache.io.invalidate := io.cpu.flush_icache icache.io.s1_paddr := tlb.io.resp.paddr icache.io.s2_vaddr := s2_pc icache.io.s1_kill := s2_redirect || tlb.io.resp.miss || s2_replay val s2_can_speculatively_refill = s2_tlb_resp.cacheable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill icache.io.s2_kill := s2_speculative && !s2_can_speculatively_refill || s2_xcpt icache.io.s2_cacheable := s2_tlb_resp.cacheable icache.io.s2_prefetch := s2_tlb_resp.prefetchable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableICachePrefetch fq.io.enq.valid := RegNext(s1_valid) && s2_valid && (icache.io.resp.valid || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss) || (!s2_tlb_resp.miss && icache.io.s2_kill)) fq.io.enq.bits.pc := s2_pc io.cpu.npc := alignPC(Mux(io.cpu.req.valid, io.cpu.req.bits.pc, npc)) fq.io.enq.bits.data := icache.io.resp.bits.data fq.io.enq.bits.mask := ((1 << fetchWidth)-1).U << s2_pc.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes)) fq.io.enq.bits.replay := (icache.io.resp.bits.replay || icache.io.s2_kill && !icache.io.resp.valid && !s2_xcpt) || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss) fq.io.enq.bits.btb := s2_btb_resp_bits fq.io.enq.bits.btb.taken := s2_btb_taken fq.io.enq.bits.xcpt := s2_tlb_resp assert(!(s2_speculative && io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill && !icache.io.s2_kill)) when (icache.io.resp.valid && icache.io.resp.bits.ae) { fq.io.enq.bits.xcpt.ae.inst := true.B } if (usingBTB) { val btb = Module(new BTB) btb.io.flush := false.B btb.io.req.valid := false.B btb.io.req.bits.addr := s1_pc btb.io.btb_update := io.cpu.btb_update btb.io.bht_update := io.cpu.bht_update btb.io.ras_update.valid := false.B btb.io.ras_update.bits := DontCare btb.io.bht_advance.valid := false.B btb.io.bht_advance.bits := DontCare when (!s2_replay) { btb.io.req.valid := !s2_redirect s2_btb_resp_valid := btb.io.resp.valid s2_btb_resp_bits := btb.io.resp.bits } when (btb.io.resp.valid && btb.io.resp.bits.taken) { predicted_npc := btb.io.resp.bits.target.sextTo(vaddrBitsExtended) predicted_taken := true.B } val force_taken = io.ptw.customCSRs.bpmStatic when (io.ptw.customCSRs.flushBTB) { btb.io.flush := true.B } when (force_taken) { btb.io.bht_update.valid := false.B } val s2_base_pc = ~(~s2_pc | (fetchBytes-1).U) val taken_idx = Wire(UInt()) val after_idx = Wire(UInt()) val useRAS = WireDefault(false.B) val updateBTB = WireDefault(false.B) // If !prevTaken, ras_update / bht_update is always invalid. taken_idx := DontCare after_idx := DontCare def scanInsns(idx: Int, prevValid: Bool, prevBits: UInt, prevTaken: Bool): Bool = { def insnIsRVC(bits: UInt) = bits(1,0) =/= 3.U val prevRVI = prevValid && !insnIsRVC(prevBits) val valid = fq.io.enq.bits.mask(idx) && !prevRVI val bits = fq.io.enq.bits.data(coreInstBits*(idx+1)-1, coreInstBits*idx) val rvc = insnIsRVC(bits) val rviBits = Cat(bits, prevBits) val rviBranch = rviBits(6,0) === Instructions.BEQ.value.U.extract(6,0) val rviJump = rviBits(6,0) === Instructions.JAL.value.U.extract(6,0) val rviJALR = rviBits(6,0) === Instructions.JALR.value.U.extract(6,0) val rviReturn = rviJALR && !rviBits(7) && BitPat("b00?01") === rviBits(19,15) val rviCall = (rviJALR || rviJump) && rviBits(7) val rvcBranch = bits === Instructions.C_BEQZ || bits === Instructions.C_BNEZ val rvcJAL = (xLen == 32).B && bits === Instructions32.C_JAL val rvcJump = bits === Instructions.C_J || rvcJAL val rvcImm = Mux(bits(14), new RVCDecoder(bits, xLen, fLen).bImm.asSInt, new RVCDecoder(bits, xLen, fLen).jImm.asSInt) val rvcJR = bits === Instructions.C_MV && bits(6,2) === 0.U val rvcReturn = rvcJR && BitPat("b00?01") === bits(11,7) val rvcJALR = bits === Instructions.C_ADD && bits(6,2) === 0.U val rvcCall = rvcJAL || rvcJALR val rviImm = Mux(rviBits(3), ImmGen(IMM_UJ, rviBits), ImmGen(IMM_SB, rviBits)) val predict_taken = s2_btb_resp_bits.bht.taken || force_taken val taken = prevRVI && (rviJump || rviJALR || rviBranch && predict_taken) || valid && (rvcJump || rvcJALR || rvcJR || rvcBranch && predict_taken) val predictReturn = btb.io.ras_head.valid && (prevRVI && rviReturn || valid && rvcReturn) val predictJump = prevRVI && rviJump || valid && rvcJump val predictBranch = predict_taken && (prevRVI && rviBranch || valid && rvcBranch) when (s2_valid && s2_btb_resp_valid && s2_btb_resp_bits.bridx === idx.U && valid && !rvc) { // The BTB has predicted that the middle of an RVI instruction is // a branch! Flush the BTB and the pipeline. btb.io.flush := true.B fq.io.enq.bits.replay := true.B wrong_path := true.B ccover(wrong_path, "BTB_NON_CFI_ON_WRONG_PATH", "BTB predicted a non-branch was taken while on the wrong path") } when (!prevTaken) { taken_idx := idx.U after_idx := (idx + 1).U btb.io.ras_update.valid := fq.io.enq.fire && !wrong_path && (prevRVI && (rviCall || rviReturn) || valid && (rvcCall || rvcReturn)) btb.io.ras_update.bits.cfiType := Mux(Mux(prevRVI, rviReturn, rvcReturn), CFIType.ret, Mux(Mux(prevRVI, rviCall, rvcCall), CFIType.call, Mux(Mux(prevRVI, rviBranch, rvcBranch) && !force_taken, CFIType.branch, CFIType.jump))) when (!s2_btb_taken) { when (fq.io.enq.fire && taken && !predictBranch && !predictJump && !predictReturn) { wrong_path := true.B } when (s2_valid && predictReturn) { useRAS := true.B } when (s2_valid && (predictBranch || predictJump)) { val pc = s2_base_pc | (idx*coreInstBytes).U val npc = if (idx == 0) pc.asSInt + Mux(prevRVI, rviImm -& 2.S, rvcImm) else Mux(prevRVI, pc - coreInstBytes.U, pc).asSInt + Mux(prevRVI, rviImm, rvcImm) predicted_npc := npc.asUInt } } when (prevRVI && rviBranch || valid && rvcBranch) { btb.io.bht_advance.valid := fq.io.enq.fire && !wrong_path btb.io.bht_advance.bits := s2_btb_resp_bits } when (!s2_btb_resp_valid && (predictBranch && s2_btb_resp_bits.bht.strongly_taken || predictJump || predictReturn)) { updateBTB := true.B } } if (idx == fetchWidth-1) { when (fq.io.enq.fire) { s2_partial_insn_valid := false.B when (valid && !prevTaken && !rvc) { s2_partial_insn_valid := true.B s2_partial_insn := bits | 0x3.U } } prevTaken || taken } else { scanInsns(idx + 1, valid, bits, prevTaken || taken) } } when (!io.cpu.btb_update.valid) { val fetch_bubble_likely = !fq.io.mask(1) btb.io.btb_update.valid := fq.io.enq.fire && !wrong_path && fetch_bubble_likely && updateBTB btb.io.btb_update.bits.prediction.entry := tileParams.btb.get.nEntries.U btb.io.btb_update.bits.isValid := true.B btb.io.btb_update.bits.cfiType := btb.io.ras_update.bits.cfiType btb.io.btb_update.bits.br_pc := s2_base_pc | (taken_idx << log2Ceil(coreInstBytes)) btb.io.btb_update.bits.pc := s2_base_pc } btb.io.ras_update.bits.returnAddr := s2_base_pc + (after_idx << log2Ceil(coreInstBytes)) val taken = scanInsns(0, s2_partial_insn_valid, s2_partial_insn, false.B) when (useRAS) { predicted_npc := btb.io.ras_head.bits } when (fq.io.enq.fire && (s2_btb_taken || taken)) { s2_partial_insn_valid := false.B } when (!s2_btb_taken) { when (taken) { fq.io.enq.bits.btb.bridx := taken_idx fq.io.enq.bits.btb.taken := true.B fq.io.enq.bits.btb.entry := tileParams.btb.get.nEntries.U when (fq.io.enq.fire) { s2_redirect := true.B } } } assert(!s2_partial_insn_valid || fq.io.enq.bits.mask(0)) when (s2_redirect) { s2_partial_insn_valid := false.B } when (io.cpu.req.valid) { wrong_path := false.B } } io.cpu.resp <> fq.io.deq // supply guest physical address to commit stage val gpa_valid = Reg(Bool()) val gpa = Reg(UInt(vaddrBitsExtended.W)) val gpa_is_pte = Reg(Bool()) when (fq.io.enq.fire && s2_tlb_resp.gf.inst) { when (!gpa_valid) { gpa := s2_tlb_resp.gpa gpa_is_pte := s2_tlb_resp.gpa_is_pte } gpa_valid := true.B } when (io.cpu.req.valid) { gpa_valid := false.B } io.cpu.gpa.valid := gpa_valid io.cpu.gpa.bits := gpa io.cpu.gpa_is_pte := gpa_is_pte // performance events io.cpu.perf.acquire := icache.io.perf.acquire io.cpu.perf.tlbMiss := io.ptw.req.fire io.errors := icache.io.errors // gate the clock clock_en_reg := !rocketParams.clockGate.B || io.cpu.might_request || // chicken bit icache.io.keep_clock_enabled || // I$ miss or ITIM access s1_valid || s2_valid || // some fetch in flight !tlb.io.req.ready || // handling TLB miss !fq.io.mask(fq.io.mask.getWidth-1) // queue not full } // leaving gated-clock domain def alignPC(pc: UInt) = ~(~pc | (coreInstBytes - 1).U) def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"FRONTEND_$label", "Rocket;;" + desc) } /** Mix-ins for constructing tiles that have an ICache-based pipeline frontend */ trait HasICacheFrontend extends CanHavePTW { this: BaseTile => val module: HasICacheFrontendModule val frontend = LazyModule(new Frontend(tileParams.icache.get, tileId)) tlMasterXbar.node := TLWidthWidget(tileParams.icache.get.rowBits/8) := frontend.masterNode connectTLSlave(frontend.slaveNode, tileParams.core.fetchBytes) frontend.icache.hartIdSinkNodeOpt.foreach { _ := hartIdNexusNode } frontend.icache.mmioAddressPrefixSinkNodeOpt.foreach { _ := mmioAddressPrefixNexusNode } frontend.resetVectorSinkNode := resetVectorNexusNode nPTWPorts += 1 // This should be a None in the case of not having an ITIM address, when we // don't actually use the device that is instantiated in the frontend. private val deviceOpt = if (tileParams.icache.get.itimAddr.isDefined) Some(frontend.icache.device) else None } trait HasICacheFrontendModule extends CanHavePTWModule { val outer: HasICacheFrontend ptwPorts += outer.frontend.module.io.ptw } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File PTW.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch} import chisel3.withClock import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property import scala.collection.mutable.ListBuffer /** PTE request from TLB to PTW * * TLB send a PTE request to PTW when L1TLB miss */ class PTWReq(implicit p: Parameters) extends CoreBundle()(p) { val addr = UInt(vpnBits.W) val need_gpa = Bool() val vstage1 = Bool() val stage2 = Bool() } /** PTE info from L2TLB to TLB * * containing: target PTE, exceptions, two-satge tanslation info */ class PTWResp(implicit p: Parameters) extends CoreBundle()(p) { /** ptw access exception */ val ae_ptw = Bool() /** final access exception */ val ae_final = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** hypervisor read */ val hr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor execute */ val hx = Bool() /** PTE to refill L1TLB * * source: L2TLB */ val pte = new PTE /** pte pglevel */ val level = UInt(log2Ceil(pgLevels).W) /** fragmented_superpage support */ val fragmented_superpage = Bool() /** homogeneous for both pma and pmp */ val homogeneous = Bool() val gpa = Valid(UInt(vaddrBits.W)) val gpa_is_pte = Bool() } /** IO between TLB and PTW * * PTW receives : * - PTE request * - CSRs info * - pmp results from PMP(in TLB) */ class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val req = Decoupled(Valid(new PTWReq)) val resp = Flipped(Valid(new PTWResp)) val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val customCSRs = Flipped(coreParams.customCSRs) } /** PTW performance statistics */ class PTWPerfEvents extends Bundle { val l2miss = Bool() val l2hit = Bool() val pte_miss = Bool() val pte_hit = Bool() } /** Datapath IO between PTW and Core * * PTW receives CSRs info, pmp checks, sfence instruction info * * PTW sends its performance statistics to core */ class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val sfence = Flipped(Valid(new SFenceReq)) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val perf = Output(new PTWPerfEvents()) val customCSRs = Flipped(coreParams.customCSRs) /** enable clock generated by ptw */ val clock_enabled = Output(Bool()) } /** PTE template for transmission * * contains useful methods to check PTE attributes * @see RV-priv spec 4.3.1 for pgae table entry format */ class PTE(implicit p: Parameters) extends CoreBundle()(p) { val reserved_for_future = UInt(10.W) val ppn = UInt(44.W) val reserved_for_software = Bits(2.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** global mapping */ val g = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() /** valid bit */ val v = Bool() /** return true if find a pointer to next level page table */ def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U /** return true if find a leaf PTE */ def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a /** user read */ def ur(dummy: Int = 0) = sr() && u /** user write*/ def uw(dummy: Int = 0) = sw() && u /** user execute */ def ux(dummy: Int = 0) = sx() && u /** supervisor read */ def sr(dummy: Int = 0) = leaf() && r /** supervisor write */ def sw(dummy: Int = 0) = leaf() && w && d /** supervisor execute */ def sx(dummy: Int = 0) = leaf() && x /** full permission: writable and executable in user mode */ def isFullPerm(dummy: Int = 0) = uw() && ux() } /** L2TLB PTE template * * contains tag bits * @param nSets number of sets in L2TLB * @see RV-priv spec 4.3.1 for page table entry format */ class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val idxBits = log2Ceil(nSets) val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0) val tag = UInt(tagBits.W) val ppn = UInt(ppnBits.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() } /** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC) * * It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb. * Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process. * * ==Structure== * - l2tlb : for leaf PTEs * - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]])) * - PLRU * - pte_cache: for non-leaf PTEs * - set-associative * - LRU * - s2_pte_cache: for non-leaf PTEs in 2-stage translation * - set-associative * - PLRU * * l2tlb Pipeline: 3 stage * {{{ * stage 0 : read * stage 1 : decode * stage 2 : hit check * }}} * ==State Machine== * s_ready: ready to reveive request from TLB * s_req: request mem; pte_cache hit judge * s_wait1: deal with l2tlb error * s_wait2: final hit judge * s_wait3: receive mem response * s_fragment_superpage: for superpage PTE * * @note l2tlb hit happens in s_req or s_wait1 * @see RV-priv spec 4.3-4.6 for Virtual-Memory System * @see RV-priv spec 8.5 for Two-Stage Address Translation * @todo details in two-stage translation */ class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { val io = IO(new Bundle { /** to n TLB */ val requestor = Flipped(Vec(n, new TLBPTWIO)) /** to HellaCache */ val mem = new HellaCacheIO /** to Core * * contains CSRs info and performance statistics */ val dpath = new DatapathPTWIO }) val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8) val state = RegInit(s_ready) val l2_refill_wire = Wire(Bool()) /** Arbiter to arbite request from n TLB */ val arb = Module(new Arbiter(Valid(new PTWReq), n)) // use TLB req as arbitor's input arb.io.in <> io.requestor.map(_.req) // receive req only when s_ready and not in refill arb.io.out.ready := (state === s_ready) && !l2_refill_wire val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B))) val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate io.dpath.clock_enabled := usingVM.B && clock_en val gated_clock = if (!usingVM || !tileParams.dcache.get.clockGate) clock else ClockGate(clock, clock_en, "ptw_clock_gate") withClock (gated_clock) { // entering gated-clock domain val invalidated = Reg(Bool()) /** current PTE level * {{{ * 0 <= count <= pgLevel-1 * count = pgLevel - 1 : leaf PTE * count < pgLevel - 1 : non-leaf PTE * }}} */ val count = Reg(UInt(log2Ceil(pgLevels).W)) val resp_ae_ptw = Reg(Bool()) val resp_ae_final = Reg(Bool()) val resp_pf = Reg(Bool()) val resp_gf = Reg(Bool()) val resp_hr = Reg(Bool()) val resp_hw = Reg(Bool()) val resp_hx = Reg(Bool()) val resp_fragmented_superpage = Reg(Bool()) /** tlb request */ val r_req = Reg(new PTWReq) /** current selected way in arbitor */ val r_req_dest = Reg(Bits()) // to respond to L1TLB : l2_hit // to construct mem.req.addr val r_pte = Reg(new PTE) val r_hgatp = Reg(new PTBR) // 2-stage pageLevel val aux_count = Reg(UInt(log2Ceil(pgLevels).W)) /** pte for 2-stage translation */ val aux_pte = Reg(new PTE) val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case val stage2 = Reg(Bool()) val stage2_final = Reg(Bool()) val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr) val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels /** 2-stage translation both enable */ val do_both_stages = r_req.vstage1 && r_req.stage2 val max_count = count max aux_count val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr) val mem_resp_valid = RegNext(io.mem.resp.valid) val mem_resp_data = RegNext(io.mem.resp.bits.data) io.mem.uncached_resp.map { resp => assert(!(resp.valid && io.mem.resp.valid)) resp.ready := true.B when (resp.valid) { mem_resp_valid := true.B mem_resp_data := resp.bits.data } } // construct pte from mem.resp val (pte, invalid_paddr, invalid_gpa) = { val tmp = mem_resp_data.asTypeOf(new PTE()) val res = WireDefault(tmp) res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0)) when (tmp.r || tmp.w || tmp.x) { // for superpage mappings, make sure PPN LSBs are zero for (i <- 0 until pgLevels-1) when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B } } (res, Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U), do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn)) } // find non-leaf PTE, need traverse val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U /** address send to mem for enquerry */ val pte_addr = if (!usingVM) 0.U else { val vpn_idxs = (0 until pgLevels).map { i => val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0) (vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0) } val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U) val vpn_idx = vpn_idxs(count) & mask val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8) val size = if (usingHypervisor) vaddrBits else paddrBits //use r_pte.ppn as page table base address //use vpn slice as offset raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0) } /** stage2_pte_cache input addr */ val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else { val vpn_idxs = (0 until pgLevels - 1).map { i => (r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0) } val vpn_idx = vpn_idxs(aux_count) val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8) raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0) } def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = { (pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i))) } /** PTECache caches non-leaf PTE * @param s2 true: 2-stage address translation */ def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) { (false.B, 0.U) } else { val plru = new PseudoLRU(coreParams.nPTECacheEntries) val valid = RegInit(0.U(coreParams.nPTECacheEntries.W)) val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W))) // not include full pte, only ppn val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W))) val can_hit = if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2) val can_refill = if (s2) do_both_stages && !stage2 && !stage2_final else can_hit val tag = if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits)) else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits)) val hits = tags.map(_ === tag).asUInt & valid val hit = hits.orR && can_hit // refill with mem response when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) { val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid)) valid := valid | UIntToOH(r) tags(r) := tag data(r) := pte.ppn plru.access(r) } // replace when (hit && state === s_req) { plru.access(OHToUInt(hits)) } when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U } val lcount = if (s2) aux_count else count for (i <- 0 until pgLevels-1) { ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i") } (hit, Mux1H(hits, data)) } // generate pte_cache val (pte_cache_hit, pte_cache_data) = makePTECache(false) // generate pte_cache with 2-stage translation val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true) // pte_cache hit or 2-stage pte_cache hit val pte_hit = RegNext(false.B) io.dpath.perf.pte_miss := false.B io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)), "PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event") // l2_refill happens when find the leaf pte val l2_refill = RegNext(false.B) l2_refill_wire := l2_refill io.dpath.perf.l2miss := false.B io.dpath.perf.l2hit := false.B // l2tlb val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else { val code = new ParityCode require(isPow2(coreParams.nL2TLBEntries)) require(isPow2(coreParams.nL2TLBWays)) require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays) val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays require(isPow2(nL2TLBSets)) val idxBits = log2Ceil(nL2TLBSets) val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru") val ram = DescribedSRAM( name = "l2_tlb_ram", desc = "L2 TLB", size = nL2TLBSets, data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W)) ) val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W))) val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W)))) // use r_req to construct tag val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits) /** the valid vec for the selected set(including n ways) */ val r_valid_vec = valid.map(_(r_idx)).asUInt val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W)) val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W)) r_valid_vec_q := r_valid_vec // replacement way r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U) // refill with r_pte(leaf pte) when (l2_refill && !invalidated) { val entry = Wire(new L2TLBEntry(nL2TLBSets)) entry.ppn := r_pte.ppn entry.d := r_pte.d entry.a := r_pte.a entry.u := r_pte.u entry.x := r_pte.x entry.w := r_pte.w entry.r := r_pte.r entry.tag := r_tag // if all the way are valid, use plru to select one way to be replaced, // otherwise use PriorityEncoderOH to select one val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W) ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools) val mask = UIntToOH(r_idx) for (way <- 0 until coreParams.nL2TLBWays) { when (wmask(way)) { valid(way) := valid(way) | mask g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask) } } } // sfence happens when (io.dpath.sfence.valid) { val hg = usingHypervisor.B && io.dpath.sfence.bits.hg for (way <- 0 until coreParams.nL2TLBWays) { valid(way) := Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)), Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way), 0.U)) } } val s0_valid = !l2_refill && arb.io.out.fire val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid) val s2_valid = RegNext(s1_valid) // read from tlb idx val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid) val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid))) val s2_valid_vec = RegEnable(r_valid_vec, s1_valid) val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid) val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR when (s2_valid && s2_error) { valid.foreach { _ := 0.U }} // decode val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets))) val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag)) val s2_hit = s2_valid && s2_hit_vec.orR io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR) io.dpath.perf.l2hit := s2_hit when (s2_hit) { l2_plru.access(r_idx, OHToUInt(s2_hit_vec)) assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit") } val s2_pte = Wire(new PTE) val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec) s2_pte.ppn := s2_hit_entry.ppn s2_pte.d := s2_hit_entry.d s2_pte.a := s2_hit_entry.a s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec) s2_pte.u := s2_hit_entry.u s2_pte.x := s2_hit_entry.x s2_pte.w := s2_hit_entry.w s2_pte.r := s2_hit_entry.r s2_pte.v := true.B s2_pte.reserved_for_future := 0.U s2_pte.reserved_for_software := 0.U for (way <- 0 until coreParams.nL2TLBWays) { ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way") } (s2_hit, s2_error, s2_pte, Some(ram)) } // if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready) // mem request io.mem.keep_clock_enabled := false.B io.mem.req.valid := state === s_req || state === s_dummy1 io.mem.req.bits.phys := true.B io.mem.req.bits.cmd := M_XRD io.mem.req.bits.size := log2Ceil(xLen/8).U io.mem.req.bits.signed := false.B io.mem.req.bits.addr := pte_addr io.mem.req.bits.idx.foreach(_ := pte_addr) io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition io.mem.req.bits.dv := do_both_stages && !stage2 io.mem.req.bits.tag := DontCare io.mem.req.bits.no_resp := false.B io.mem.req.bits.no_alloc := DontCare io.mem.req.bits.no_xcpt := DontCare io.mem.req.bits.data := DontCare io.mem.req.bits.mask := DontCare io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf io.mem.s1_data := DontCare io.mem.s2_kill := false.B val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}") val pmaPgLevelHomogeneous = (0 until pgLevels) map { i => val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits)) if (pageGranularityPMPs && i == pgLevels - 1) { require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned") true.B } else { TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous } } val pmaHomogeneous = pmaPgLevelHomogeneous(count) val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count) val homogeneous = pmaHomogeneous && pmpHomogeneous // response to tlb for (i <- 0 until io.requestor.size) { io.requestor(i).resp.valid := resp_valid(i) io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw io.requestor(i).resp.bits.ae_final := resp_ae_final io.requestor(i).resp.bits.pf := resp_pf io.requestor(i).resp.bits.gf := resp_gf io.requestor(i).resp.bits.hr := resp_hr io.requestor(i).resp.bits.hw := resp_hw io.requestor(i).resp.bits.hx := resp_hx io.requestor(i).resp.bits.pte := r_pte io.requestor(i).resp.bits.level := max_count io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa io.requestor(i).resp.bits.gpa.bits := Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff) io.requestor(i).resp.bits.gpa_is_pte := !stage2_final io.requestor(i).ptbr := io.dpath.ptbr io.requestor(i).hgatp := io.dpath.hgatp io.requestor(i).vsatp := io.dpath.vsatp io.requestor(i).customCSRs <> io.dpath.customCSRs io.requestor(i).status := io.dpath.status io.requestor(i).hstatus := io.dpath.hstatus io.requestor(i).gstatus := io.dpath.gstatus io.requestor(i).pmp := io.dpath.pmp } // control state machine val next_state = WireDefault(state) state := OptimizationBarrier(next_state) val do_switch = WireDefault(false.B) switch (state) { is (s_ready) { when (arb.io.out.fire) { val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr) r_req := arb.io.out.bits.bits r_req_dest := arb.io.chosen next_state := Mux(arb.io.out.bits.valid, s_req, s_ready) stage2 := arb.io.out.bits.bits.stage2 stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1 count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count) aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U) aux_pte.ppn := aux_ppn aux_pte.reserved_for_future := 0.U resp_ae_ptw := false.B resp_ae_final := false.B resp_pf := false.B resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2 resp_hr := true.B resp_hw := true.B resp_hx := true.B resp_fragmented_superpage := false.B r_hgatp := io.dpath.hgatp assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2) } } is (s_req) { when(stage2 && count === r_hgatp_initial_count) { gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr) } // pte_cache hit when (stage2_pte_cache_hit) { aux_count := aux_count + 1.U aux_pte.ppn := stage2_pte_cache_data aux_pte.reserved_for_future := 0.U pte_hit := true.B }.elsewhen (pte_cache_hit) { count := count + 1.U pte_hit := true.B }.otherwise { next_state := Mux(io.mem.req.ready, s_wait1, s_req) } when(resp_gf) { next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_wait1) { // This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below next_state := Mux(l2_hit, s_req, s_wait2) } is (s_wait2) { next_state := s_wait3 io.dpath.perf.pte_miss := count < (pgLevels-1).U when (io.mem.s2_xcpt.ae.ld) { resp_ae_ptw := true.B next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_fragment_superpage) { next_state := s_ready resp_valid(r_req_dest) := true.B when (!homogeneous) { count := (pgLevels-1).U resp_fragmented_superpage := true.B } when (do_both_stages) { resp_fragmented_superpage := true.B } } } val merged_pte = { val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U) val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U)) val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn val stage1_ppn = stage1_ppns(count) makePTE(stage1_ppn & superpage_mask, aux_pte) } r_pte := OptimizationBarrier( // l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB Mux(l2_hit && !l2_error && !resp_gf, l2_pte, // S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte), // pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte), // 2-stage translation Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte), // when mem respond, store mem.resp.pte Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte), // fragment_superpage Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte), // when tlb request come->request mem, use root address in satp(or vsatp,hgatp) Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)), r_pte)))))))) when (l2_hit && !l2_error && !resp_gf) { assert(state === s_req || state === s_wait1) next_state := s_ready resp_valid(r_req_dest) := true.B count := (pgLevels-1).U } when (mem_resp_valid) { assert(state === s_wait3) next_state := s_req when (traverse) { when (do_both_stages && !stage2) { do_switch := true.B } count := count + 1.U }.otherwise { val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa) val ae = pte.v && invalid_paddr val pf = pte.v && pte.reserved_for_future =/= 0.U val success = pte.v && !ae && !pf && !gf when (do_both_stages && !stage2_final && success) { when (stage2) { stage2 := false.B count := aux_count }.otherwise { stage2_final := true.B do_switch := true.B } }.otherwise { // find a leaf pte, start l2 refill l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa && (!r_req.vstage1 && !r_req.stage2 || do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm()) count := max_count when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) { next_state := s_fragment_superpage }.otherwise { next_state := s_ready resp_valid(r_req_dest) := true.B } resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table() resp_ae_final := ae && pte.leaf() resp_pf := pf && !stage2 resp_gf := gf || (pf && stage2) resp_hr := !stage2 || (!pf && !gf && pte.ur()) resp_hw := !stage2 || (!pf && !gf && pte.uw()) resp_hx := !stage2 || (!pf && !gf && pte.ux()) } } } when (io.mem.s2_nack) { assert(state === s_wait2) next_state := s_req } when (do_switch) { aux_count := Mux(traverse, count + 1.U, count) count := r_hgatp_initial_count aux_pte := Mux(traverse, pte, { val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn makePTE(s1_ppns(count), pte) }) stage2 := true.B } for (i <- 0 until pgLevels) { val leaf = mem_resp_valid && !traverse && count === i.U ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i") ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i") ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i") ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i") ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i") if (i != pgLevels-1) ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i") } ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep") ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access") ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table") } // leaving gated-clock domain private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc) /** Relace PTE.ppn with ppn */ private def makePTE(ppn: UInt, default: PTE) = { val pte = WireDefault(default) pte.ppn := ppn pte } /** use hgatp and vpn to construct a new ppn */ private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits)) val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count)) val pte = WireDefault(default) pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs) pte } /** use hgatp and vpn to check for gpa out of range */ private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits)) idxs.extract(count) =/= 0.U } } /** Mix-ins for constructing tiles that might have a PTW */ trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile => val module: CanHavePTWModule var nPTWPorts = 1 nDCachePorts += usingPTW.toInt } trait CanHavePTWModule extends HasHellaCacheModule { val outer: CanHavePTW val ptwPorts = ListBuffer(outer.dcache.module.io.ptw) val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p)) ptw.io.mem <> DontCare if (outer.usingPTW) { dcachePorts += ptw.io.mem } } File Configs.scala: package rerocc import chisel3._ import org.chipsalliance.cde.config._ import freechips.rocketchip.tile.{BuildRoCC} import freechips.rocketchip.diplomacy.{LazyModule} import rerocc.client._ import rerocc.manager._ import rerocc.bus._ class WithReRoCC(clientParams: ReRoCCClientParams = ReRoCCClientParams(), reRoCCManagerParams: ReRoCCTileParams = ReRoCCTileParams()) extends Config((site, here, up) => { case BuildRoCC => Seq((p: Parameters) => { val rerocc_client = LazyModule(new ReRoCCClient(clientParams)(p)) rerocc_client }) case ReRoCCTileKey => up(BuildRoCC).map(gen => reRoCCManagerParams.copy(genRoCC=Some(gen))) }) class WithReRoCCNoC(nocParams: ReRoCCNoCParams) extends Config((site, here, up) => { case ReRoCCNoCKey => Some(nocParams) }) File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File HierarchicalElement.scala: package freechips.rocketchip.subsystem import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.debug.TLDebugModule import freechips.rocketchip.diplomacy.{BufferParams} import freechips.rocketchip.interrupts.IntXbar import freechips.rocketchip.prci.{ClockSinkParameters, ResetCrossingType, ClockCrossingType} import freechips.rocketchip.tile.{LookupByHartIdImpl, TraceBundle} import freechips.rocketchip.tilelink.{TLNode, TLIdentityNode, TLXbar, TLBuffer, TLInwardNode, TLOutwardNode} trait HierarchicalElementParams { val baseName: String // duplicated instances shouuld share a base name val uniqueName: String val clockSinkParams: ClockSinkParameters } abstract class InstantiableHierarchicalElementParams[ElementType <: BaseHierarchicalElement] extends HierarchicalElementParams /** An interface for describing the parameteization of how HierarchicalElements are connected to interconnects */ trait HierarchicalElementCrossingParamsLike { /** The type of clock crossing that should be inserted at the element boundary. */ def crossingType: ClockCrossingType /** Parameters describing the contents and behavior of the point where the element is attached as an interconnect master. */ def master: HierarchicalElementPortParamsLike /** Parameters describing the contents and behavior of the point where the element is attached as an interconnect slave. */ def slave: HierarchicalElementPortParamsLike /** The subnetwork location of the device selecting the apparent base address of MMIO devices inside the element */ def mmioBaseAddressPrefixWhere: TLBusWrapperLocation /** Inject a reset management subgraph that effects the element child reset only */ def resetCrossingType: ResetCrossingType /** Keep the element clock separate from the interconnect clock (e.g. even if they are synchronous to one another) */ def forceSeparateClockReset: Boolean } /** An interface for describing the parameterization of how a particular element port is connected to an interconnect */ trait HierarchicalElementPortParamsLike { /** The subnetwork location of the interconnect to which this element port should be connected. */ def where: TLBusWrapperLocation /** Allows port-specific adapters to be injected into the interconnect side of the attachment point. */ def injectNode(context: Attachable)(implicit p: Parameters): TLNode } abstract class BaseHierarchicalElement (val crossing: ClockCrossingType)(implicit p: Parameters) extends LazyModule()(p) with CrossesToOnlyOneClockDomain { def module: BaseHierarchicalElementModuleImp[BaseHierarchicalElement] protected val tlOtherMastersNode = TLIdentityNode() protected val tlMasterXbar = LazyModule(new TLXbar(nameSuffix = Some(s"MasterXbar_$desiredName"))) protected val tlSlaveXbar = LazyModule(new TLXbar(nameSuffix = Some(s"SlaveXbar_$desiredName"))) protected val intXbar = LazyModule(new IntXbar) def masterNode: TLOutwardNode def slaveNode: TLInwardNode /** Helper function to insert additional buffers on master ports at the boundary of the tile. * * The boundary buffering needed to cut feed-through paths is * microarchitecture specific, so this may need to be overridden * in subclasses of this class. */ def makeMasterBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = TLBuffer(BufferParams.none) /** Helper function to insert additional buffers on slave ports at the boundary of the tile. * * The boundary buffering needed to cut feed-through paths is * microarchitecture specific, so this may need to be overridden * in subclasses of this class. */ def makeSlaveBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = TLBuffer(BufferParams.none) } abstract class BaseHierarchicalElementModuleImp[+L <: BaseHierarchicalElement](val outer: L) extends LazyModuleImp(outer) File RocketTile.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.tile import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.tilelink.{BasicBusBlockerParams, BasicBusBlocker} import freechips.rocketchip.diplomacy.{ AddressSet, DisableMonitors, BufferParams } import freechips.rocketchip.resources.{ SimpleDevice, Description, ResourceAnchors, ResourceBindings, ResourceBinding, Resource, ResourceAddress, } import freechips.rocketchip.interrupts.IntIdentityNode import freechips.rocketchip.tilelink.{TLIdentityNode, TLBuffer} import freechips.rocketchip.rocket.{ RocketCoreParams, ICacheParams, DCacheParams, BTBParams, HasHellaCache, HasICacheFrontend, ScratchpadSlavePort, HasICacheFrontendModule, Rocket } import freechips.rocketchip.subsystem.HierarchicalElementCrossingParamsLike import freechips.rocketchip.prci.{ClockSinkParameters, RationalCrossing, ClockCrossingType} import freechips.rocketchip.util.{Annotated, InOrderArbiter} import freechips.rocketchip.util.BooleanToAugmentedBoolean case class RocketTileBoundaryBufferParams(force: Boolean = false) case class RocketTileParams( core: RocketCoreParams = RocketCoreParams(), icache: Option[ICacheParams] = Some(ICacheParams()), dcache: Option[DCacheParams] = Some(DCacheParams()), btb: Option[BTBParams] = Some(BTBParams()), dataScratchpadBytes: Int = 0, tileId: Int = 0, beuAddr: Option[BigInt] = None, blockerCtrlAddr: Option[BigInt] = None, clockSinkParams: ClockSinkParameters = ClockSinkParameters(), boundaryBuffers: Option[RocketTileBoundaryBufferParams] = None ) extends InstantiableTileParams[RocketTile] { require(icache.isDefined) require(dcache.isDefined) val baseName = "rockettile" val uniqueName = s"${baseName}_$tileId" def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)(implicit p: Parameters): RocketTile = { new RocketTile(this, crossing, lookup) } } class RocketTile private( val rocketParams: RocketTileParams, crossing: ClockCrossingType, lookup: LookupByHartIdImpl, q: Parameters) extends BaseTile(rocketParams, crossing, lookup, q) with SinksExternalInterrupts with SourcesExternalNotifications with HasLazyRoCC // implies CanHaveSharedFPU with CanHavePTW with HasHellaCache with HasHellaCache with HasICacheFrontend { // Private constructor ensures altered LazyModule.p is used implicitly def this(params: RocketTileParams, crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)(implicit p: Parameters) = this(params, crossing.crossingType, lookup, p) val intOutwardNode = rocketParams.beuAddr map { _ => IntIdentityNode() } val slaveNode = TLIdentityNode() val masterNode = visibilityNode val dtim_adapter = tileParams.dcache.flatMap { d => d.scratch.map { s => LazyModule(new ScratchpadSlavePort(AddressSet.misaligned(s, d.dataScratchpadBytes), lazyCoreParamsView.coreDataBytes, tileParams.core.useAtomics && !tileParams.core.useAtomicsOnlyForIO)) }} dtim_adapter.foreach(lm => connectTLSlave(lm.node, lm.node.portParams.head.beatBytes)) val bus_error_unit = rocketParams.beuAddr map { a => val beu = LazyModule(new BusErrorUnit(new L1BusErrors, BusErrorUnitParams(a), xLen/8)) intOutwardNode.get := beu.intNode connectTLSlave(beu.node, xBytes) beu } val tile_master_blocker = tileParams.blockerCtrlAddr .map(BasicBusBlockerParams(_, xBytes, masterPortBeatBytes, deadlock = true)) .map(bp => LazyModule(new BasicBusBlocker(bp))) tile_master_blocker.foreach(lm => connectTLSlave(lm.controlNode, xBytes)) // TODO: this doesn't block other masters, e.g. RoCCs tlOtherMastersNode := tile_master_blocker.map { _.node := tlMasterXbar.node } getOrElse { tlMasterXbar.node } masterNode :=* tlOtherMastersNode DisableMonitors { implicit p => tlSlaveXbar.node :*= slaveNode } nDCachePorts += 1 /*core */ + (dtim_adapter.isDefined).toInt + rocketParams.core.vector.map(_.useDCache.toInt).getOrElse(0) val dtimProperty = dtim_adapter.map(d => Map( "sifive,dtim" -> d.device.asProperty)).getOrElse(Nil) val itimProperty = frontend.icache.itimProperty.toSeq.flatMap(p => Map("sifive,itim" -> p)) val beuProperty = bus_error_unit.map(d => Map( "sifive,buserror" -> d.device.asProperty)).getOrElse(Nil) val cpuDevice: SimpleDevice = new SimpleDevice("cpu", Seq("sifive,rocket0", "riscv")) { override def parent = Some(ResourceAnchors.cpus) override def describe(resources: ResourceBindings): Description = { val Description(name, mapping) = super.describe(resources) Description(name, mapping ++ cpuProperties ++ nextLevelCacheProperty ++ tileProperties ++ dtimProperty ++ itimProperty ++ beuProperty) } } val vector_unit = rocketParams.core.vector.map(v => LazyModule(v.build(p))) vector_unit.foreach(vu => tlMasterXbar.node :=* vu.atlNode) vector_unit.foreach(vu => tlOtherMastersNode :=* vu.tlNode) ResourceBinding { Resource(cpuDevice, "reg").bind(ResourceAddress(tileId)) } override lazy val module = new RocketTileModuleImp(this) override def makeMasterBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = (rocketParams.boundaryBuffers, crossing) match { case (Some(RocketTileBoundaryBufferParams(true )), _) => TLBuffer() case (Some(RocketTileBoundaryBufferParams(false)), _: RationalCrossing) => TLBuffer(BufferParams.none, BufferParams.flow, BufferParams.none, BufferParams.flow, BufferParams(1)) case _ => TLBuffer(BufferParams.none) } override def makeSlaveBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = (rocketParams.boundaryBuffers, crossing) match { case (Some(RocketTileBoundaryBufferParams(true )), _) => TLBuffer() case (Some(RocketTileBoundaryBufferParams(false)), _: RationalCrossing) => TLBuffer(BufferParams.flow, BufferParams.none, BufferParams.none, BufferParams.none, BufferParams.none) case _ => TLBuffer(BufferParams.none) } } class RocketTileModuleImp(outer: RocketTile) extends BaseTileModuleImp(outer) with HasFpuOpt with HasLazyRoCCModule with HasICacheFrontendModule { Annotated.params(this, outer.rocketParams) val core = Module(new Rocket(outer)(outer.p)) outer.vector_unit.foreach { v => core.io.vector.get <> v.module.io.core v.module.io.tlb <> outer.dcache.module.io.tlb_port } // reset vector is connected in the Frontend to s2_pc core.io.reset_vector := DontCare // Report unrecoverable error conditions; for now the only cause is cache ECC errors outer.reportHalt(List(outer.dcache.module.io.errors)) // Report when the tile has ceased to retire instructions; for now the only cause is clock gating outer.reportCease(outer.rocketParams.core.clockGate.option( !outer.dcache.module.io.cpu.clock_enabled && !outer.frontend.module.io.cpu.clock_enabled && !ptw.io.dpath.clock_enabled && core.io.cease)) outer.reportWFI(Some(core.io.wfi)) outer.decodeCoreInterrupts(core.io.interrupts) // Decode the interrupt vector outer.bus_error_unit.foreach { beu => core.io.interrupts.buserror.get := beu.module.io.interrupt beu.module.io.errors.dcache := outer.dcache.module.io.errors beu.module.io.errors.icache := outer.frontend.module.io.errors } core.io.interrupts.nmi.foreach { nmi => nmi := outer.nmiSinkNode.get.bundle } // Pass through various external constants and reports that were bundle-bridged into the tile outer.traceSourceNode.bundle <> core.io.trace core.io.traceStall := outer.traceAuxSinkNode.bundle.stall outer.bpwatchSourceNode.bundle <> core.io.bpwatch core.io.hartid := outer.hartIdSinkNode.bundle require(core.io.hartid.getWidth >= outer.hartIdSinkNode.bundle.getWidth, s"core hartid wire (${core.io.hartid.getWidth}b) truncates external hartid wire (${outer.hartIdSinkNode.bundle.getWidth}b)") // Connect the core pipeline to other intra-tile modules outer.frontend.module.io.cpu <> core.io.imem dcachePorts += core.io.dmem // TODO outer.dcachePorts += () => module.core.io.dmem ?? fpuOpt foreach { fpu => core.io.fpu :<>= fpu.io.waiveAs[FPUCoreIO](_.cp_req, _.cp_resp) } if (fpuOpt.isEmpty) { core.io.fpu := DontCare } outer.vector_unit foreach { v => if (outer.rocketParams.core.vector.get.useDCache) { dcachePorts += v.module.io.dmem } else { v.module.io.dmem := DontCare } } core.io.ptw <> ptw.io.dpath // Connect the coprocessor interfaces if (outer.roccs.size > 0) { cmdRouter.get.io.in <> core.io.rocc.cmd outer.roccs.foreach{ lm => lm.module.io.exception := core.io.rocc.exception lm.module.io.fpu_req.ready := DontCare lm.module.io.fpu_resp.valid := DontCare lm.module.io.fpu_resp.bits.data := DontCare lm.module.io.fpu_resp.bits.exc := DontCare } core.io.rocc.resp <> respArb.get.io.out core.io.rocc.busy <> (cmdRouter.get.io.busy || outer.roccs.map(_.module.io.busy).reduce(_ || _)) core.io.rocc.interrupt := outer.roccs.map(_.module.io.interrupt).reduce(_ || _) (core.io.rocc.csrs zip roccCSRIOs.flatten).foreach { t => t._2 <> t._1 } } else { // tie off core.io.rocc.cmd.ready := false.B core.io.rocc.resp.valid := false.B core.io.rocc.resp.bits := DontCare core.io.rocc.busy := DontCare core.io.rocc.interrupt := DontCare } // Dont care mem since not all RoCC need accessing memory core.io.rocc.mem := DontCare // Rocket has higher priority to DTIM than other TileLink clients outer.dtim_adapter.foreach { lm => dcachePorts += lm.module.io.dmem } // TODO eliminate this redundancy val h = dcachePorts.size val c = core.dcacheArbPorts val o = outer.nDCachePorts require(h == c, s"port list size was $h, core expected $c") require(h == o, s"port list size was $h, outer counted $o") // TODO figure out how to move the below into their respective mix-ins dcacheArb.io.requestor <> dcachePorts.toSeq ptw.io.requestor <> ptwPorts.toSeq } trait HasFpuOpt { this: RocketTileModuleImp => val fpuOpt = outer.tileParams.core.fpu.map(params => Module(new FPU(params)(outer.p))) fpuOpt.foreach { fpu => val nRoCCFPUPorts = outer.roccs.count(_.usesFPU) val nFPUPorts = nRoCCFPUPorts + outer.rocketParams.core.useVector.toInt if (nFPUPorts > 0) { val fpArb = Module(new InOrderArbiter(new FPInput()(outer.p), new FPResult()(outer.p), nFPUPorts)) fpu.io.cp_req <> fpArb.io.out_req fpArb.io.out_resp <> fpu.io.cp_resp val fp_rocc_ios = outer.roccs.filter(_.usesFPU).map(_.module.io) for (i <- 0 until nRoCCFPUPorts) { fpArb.io.in_req(i) <> fp_rocc_ios(i).fpu_req fp_rocc_ios(i).fpu_resp <> fpArb.io.in_resp(i) } outer.vector_unit.foreach(vu => { fpArb.io.in_req(nRoCCFPUPorts) <> vu.module.io.fp_req vu.module.io.fp_resp <> fpArb.io.in_resp(nRoCCFPUPorts) }) } else { fpu.io.cp_req.valid := false.B fpu.io.cp_req.bits := DontCare fpu.io.cp_resp.ready := false.B } } } File BundleBridgeNexus.scala: package org.chipsalliance.diplomacy.bundlebridge import chisel3.{chiselTypeOf, ActualDirection, Data, Reg} import chisel3.reflect.DataMirror import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyRawModuleImp} class BundleBridgeNexus[T <: Data]( inputFn: Seq[T] => T, outputFn: (T, Int) => Seq[T], default: Option[() => T] = None, inputRequiresOutput: Boolean = false, override val shouldBeInlined: Boolean = true )( implicit p: Parameters) extends LazyModule { val node = BundleBridgeNexusNode[T](default, inputRequiresOutput) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val defaultWireOpt = default.map(_()) val inputs: Seq[T] = node.in.map(_._1) inputs.foreach { i => require( DataMirror.checkTypeEquivalence(i, inputs.head), s"${node.context} requires all inputs have equivalent Chisel Data types, but got\n$i\nvs\n${inputs.head}" ) } inputs.flatMap(getElements).foreach { elt => DataMirror.directionOf(elt) match { case ActualDirection.Output => () case ActualDirection.Unspecified => () case _ => require(false, s"${node.context} can only be used with Output-directed Bundles") } } val outputs: Seq[T] = if (node.out.size > 0) { val broadcast: T = if (inputs.size >= 1) inputFn(inputs) else defaultWireOpt.get outputFn(broadcast, node.out.size) } else { Nil } val typeName = outputs.headOption.map(_.typeName).getOrElse("NoOutput") override def desiredName = s"BundleBridgeNexus_$typeName" node.out.map(_._1).foreach { o => require( DataMirror.checkTypeEquivalence(o, outputs.head), s"${node.context} requires all outputs have equivalent Chisel Data types, but got\n$o\nvs\n${outputs.head}" ) } require( outputs.size == node.out.size, s"${node.context} outputFn must generate one output wire per edgeOut, but got ${outputs.size} vs ${node.out.size}" ) node.out.zip(outputs).foreach { case ((out, _), bcast) => out := bcast } } } object BundleBridgeNexus { def safeRegNext[T <: Data](x: T): T = { val reg = Reg(chiselTypeOf(x)) reg := x reg } def requireOne[T <: Data](registered: Boolean)(seq: Seq[T]): T = { require(seq.size == 1, "BundleBroadcast default requires one input") if (registered) safeRegNext(seq.head) else seq.head } def orReduction[T <: Data](registered: Boolean)(seq: Seq[T]): T = { val x = seq.reduce((a, b) => (a.asUInt | b.asUInt).asTypeOf(seq.head)) if (registered) safeRegNext(x) else x } def fillN[T <: Data](registered: Boolean)(x: T, n: Int): Seq[T] = Seq.fill(n) { if (registered) safeRegNext(x) else x } def apply[T <: Data]( inputFn: Seq[T] => T = orReduction[T](false) _, outputFn: (T, Int) => Seq[T] = fillN[T](false) _, default: Option[() => T] = None, inputRequiresOutput: Boolean = false, shouldBeInlined: Boolean = true )( implicit p: Parameters ): BundleBridgeNexusNode[T] = { val nexus = LazyModule(new BundleBridgeNexus[T](inputFn, outputFn, default, inputRequiresOutput, shouldBeInlined)) nexus.node } }
module RocketTile( // @[RocketTile.scala:141:7] input clock, // @[RocketTile.scala:141:7] input reset, // @[RocketTile.scala:141:7] input auto_rerocc_buffer_out_req_ready, // @[LazyModuleImp.scala:107:25] output auto_rerocc_buffer_out_req_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_rerocc_buffer_out_req_bits_opcode, // @[LazyModuleImp.scala:107:25] output [3:0] auto_rerocc_buffer_out_req_bits_client_id, // @[LazyModuleImp.scala:107:25] output [2:0] auto_rerocc_buffer_out_req_bits_manager_id, // @[LazyModuleImp.scala:107:25] output [63:0] auto_rerocc_buffer_out_req_bits_data, // @[LazyModuleImp.scala:107:25] output auto_rerocc_buffer_out_resp_ready, // @[LazyModuleImp.scala:107:25] input auto_rerocc_buffer_out_resp_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_rerocc_buffer_out_resp_bits_opcode, // @[LazyModuleImp.scala:107:25] input [3:0] auto_rerocc_buffer_out_resp_bits_client_id, // @[LazyModuleImp.scala:107:25] input [2:0] auto_rerocc_buffer_out_resp_bits_manager_id, // @[LazyModuleImp.scala:107:25] input [63:0] auto_rerocc_buffer_out_resp_bits_data, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_b_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_buffer_out_b_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_b_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_buffer_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_buffer_out_b_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_buffer_out_b_bits_data, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_buffer_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_buffer_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_buffer_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_buffer_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_e_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_e_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_wfi_out_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_3_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_2_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_1_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_1_1, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_0_0, // @[LazyModuleImp.scala:107:25] output auto_trace_source_out_insns_0_valid, // @[LazyModuleImp.scala:107:25] output [39:0] auto_trace_source_out_insns_0_iaddr, // @[LazyModuleImp.scala:107:25] output [31:0] auto_trace_source_out_insns_0_insn, // @[LazyModuleImp.scala:107:25] output [2:0] auto_trace_source_out_insns_0_priv, // @[LazyModuleImp.scala:107:25] output auto_trace_source_out_insns_0_exception, // @[LazyModuleImp.scala:107:25] output auto_trace_source_out_insns_0_interrupt, // @[LazyModuleImp.scala:107:25] output [63:0] auto_trace_source_out_insns_0_cause, // @[LazyModuleImp.scala:107:25] output [39:0] auto_trace_source_out_insns_0_tval, // @[LazyModuleImp.scala:107:25] output [63:0] auto_trace_source_out_time, // @[LazyModuleImp.scala:107:25] input auto_hartid_in // @[LazyModuleImp.scala:107:25] ); wire buffer_auto_in_e_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_e_ready; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_e_bits_sink; // @[Buffer.scala:40:9] wire buffer_auto_in_d_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_d_ready; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_c_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_c_ready; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_c_bits_data; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_c_bits_address; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_c_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_c_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_c_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_c_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_b_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_b_ready; // @[Buffer.scala:40:9] wire buffer_auto_in_b_bits_corrupt; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_b_bits_data; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_in_b_bits_mask; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_b_bits_address; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_b_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_b_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_b_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_b_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_a_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_a_ready; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire [1:0] widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [31:0] widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] broadcast_2_auto_in_0_action; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_auto_in_0_valid_0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_auto_in; // @[BundleBridgeNexus.scala:20:9] wire _core_io_imem_might_request; // @[RocketTile.scala:147:20] wire _core_io_imem_req_valid; // @[RocketTile.scala:147:20] wire [39:0] _core_io_imem_req_bits_pc; // @[RocketTile.scala:147:20] wire _core_io_imem_req_bits_speculative; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_valid; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_rs1; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_rs2; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_sfence_bits_addr; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_asid; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_hv; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_hg; // @[RocketTile.scala:147:20] wire _core_io_imem_resp_ready; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_valid; // @[RocketTile.scala:147:20] wire [1:0] _core_io_imem_btb_update_bits_prediction_cfiType; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_prediction_taken; // @[RocketTile.scala:147:20] wire [1:0] _core_io_imem_btb_update_bits_prediction_mask; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_prediction_bridx; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_prediction_target; // @[RocketTile.scala:147:20] wire [4:0] _core_io_imem_btb_update_bits_prediction_entry; // @[RocketTile.scala:147:20] wire [7:0] _core_io_imem_btb_update_bits_prediction_bht_history; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_prediction_bht_value; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_pc; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_target; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_isValid; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_br_pc; // @[RocketTile.scala:147:20] wire [1:0] _core_io_imem_btb_update_bits_cfiType; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_valid; // @[RocketTile.scala:147:20] wire [7:0] _core_io_imem_bht_update_bits_prediction_history; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_prediction_value; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_bht_update_bits_pc; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_branch; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_taken; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_mispredict; // @[RocketTile.scala:147:20] wire _core_io_imem_flush_icache; // @[RocketTile.scala:147:20] wire _core_io_imem_progress; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_valid; // @[RocketTile.scala:147:20] wire [39:0] _core_io_dmem_req_bits_addr; // @[RocketTile.scala:147:20] wire [7:0] _core_io_dmem_req_bits_tag; // @[RocketTile.scala:147:20] wire [4:0] _core_io_dmem_req_bits_cmd; // @[RocketTile.scala:147:20] wire [1:0] _core_io_dmem_req_bits_size; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_bits_signed; // @[RocketTile.scala:147:20] wire [1:0] _core_io_dmem_req_bits_dprv; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_bits_dv; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_bits_no_resp; // @[RocketTile.scala:147:20] wire _core_io_dmem_s1_kill; // @[RocketTile.scala:147:20] wire [63:0] _core_io_dmem_s1_data_data; // @[RocketTile.scala:147:20] wire _core_io_dmem_keep_clock_enabled; // @[RocketTile.scala:147:20] wire [3:0] _core_io_ptw_ptbr_mode; // @[RocketTile.scala:147:20] wire [43:0] _core_io_ptw_ptbr_ppn; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_valid; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_rs1; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_rs2; // @[RocketTile.scala:147:20] wire [38:0] _core_io_ptw_sfence_bits_addr; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_asid; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_hv; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_hg; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_debug; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_cease; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_wfi; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_status_isa; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_dprv; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_dv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_prv; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_v; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mpv; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_gva; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_tsr; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_tw; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_tvm; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mxr; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_sum; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mprv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_fs; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_mpp; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_spp; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mpie; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_spie; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mie; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_sie; // @[RocketTile.scala:147:20] wire _core_io_ptw_hstatus_spvp; // @[RocketTile.scala:147:20] wire _core_io_ptw_hstatus_spv; // @[RocketTile.scala:147:20] wire _core_io_ptw_hstatus_gva; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_debug; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_cease; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_wfi; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_gstatus_isa; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_dprv; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_dv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_prv; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_v; // @[RocketTile.scala:147:20] wire [22:0] _core_io_ptw_gstatus_zero2; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mpv; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_gva; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mbe; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sbe; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_sxl; // @[RocketTile.scala:147:20] wire [7:0] _core_io_ptw_gstatus_zero1; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_tsr; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_tw; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_tvm; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mxr; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sum; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mprv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_fs; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_mpp; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_vs; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_spp; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mpie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_ube; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_spie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_upie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_hie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_uie; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_0_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_0_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_0_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_1_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_1_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_1_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_2_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_2_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_2_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_3_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_3_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_3_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_4_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_4_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_4_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_5_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_5_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_5_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_6_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_6_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_6_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_7_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_7_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_7_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_0_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_0_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_0_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_0_value; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_1_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_1_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_1_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_1_value; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_2_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_2_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_2_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_2_value; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_3_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_3_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_3_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_3_value; // @[RocketTile.scala:147:20] wire _core_io_fpu_hartid; // @[RocketTile.scala:147:20] wire [63:0] _core_io_fpu_time; // @[RocketTile.scala:147:20] wire [31:0] _core_io_fpu_inst; // @[RocketTile.scala:147:20] wire [63:0] _core_io_fpu_fromint_data; // @[RocketTile.scala:147:20] wire [2:0] _core_io_fpu_fcsr_rm; // @[RocketTile.scala:147:20] wire _core_io_fpu_ll_resp_val; // @[RocketTile.scala:147:20] wire [2:0] _core_io_fpu_ll_resp_type; // @[RocketTile.scala:147:20] wire [4:0] _core_io_fpu_ll_resp_tag; // @[RocketTile.scala:147:20] wire [63:0] _core_io_fpu_ll_resp_data; // @[RocketTile.scala:147:20] wire _core_io_fpu_valid; // @[RocketTile.scala:147:20] wire _core_io_fpu_killx; // @[RocketTile.scala:147:20] wire _core_io_fpu_killm; // @[RocketTile.scala:147:20] wire _core_io_fpu_keep_clock_enabled; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_valid; // @[RocketTile.scala:147:20] wire [6:0] _core_io_rocc_cmd_bits_inst_funct; // @[RocketTile.scala:147:20] wire [4:0] _core_io_rocc_cmd_bits_inst_rs2; // @[RocketTile.scala:147:20] wire [4:0] _core_io_rocc_cmd_bits_inst_rs1; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_inst_xd; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_inst_xs1; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_inst_xs2; // @[RocketTile.scala:147:20] wire [4:0] _core_io_rocc_cmd_bits_inst_rd; // @[RocketTile.scala:147:20] wire [6:0] _core_io_rocc_cmd_bits_inst_opcode; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_cmd_bits_rs1; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_cmd_bits_rs2; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_debug; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_cease; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_wfi; // @[RocketTile.scala:147:20] wire [31:0] _core_io_rocc_cmd_bits_status_isa; // @[RocketTile.scala:147:20] wire [1:0] _core_io_rocc_cmd_bits_status_dprv; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_dv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_rocc_cmd_bits_status_prv; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_v; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_mpv; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_gva; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_tsr; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_tw; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_tvm; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_mxr; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_sum; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_mprv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_rocc_cmd_bits_status_fs; // @[RocketTile.scala:147:20] wire [1:0] _core_io_rocc_cmd_bits_status_mpp; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_spp; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_mpie; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_spie; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_mie; // @[RocketTile.scala:147:20] wire _core_io_rocc_cmd_bits_status_sie; // @[RocketTile.scala:147:20] wire _core_io_rocc_resp_ready; // @[RocketTile.scala:147:20] wire _core_io_rocc_exception; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_0_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_0_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_0_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_0_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_1_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_1_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_1_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_1_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_2_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_2_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_2_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_2_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_3_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_3_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_3_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_3_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_4_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_4_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_4_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_4_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_5_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_5_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_5_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_5_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_6_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_6_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_6_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_6_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_7_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_7_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_7_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_7_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_8_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_8_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_8_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_8_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_9_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_9_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_9_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_9_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_10_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_10_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_10_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_10_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_11_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_11_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_11_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_11_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_12_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_12_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_12_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_12_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_13_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_13_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_13_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_13_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_14_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_14_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_14_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_14_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_15_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_15_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_15_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_15_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_16_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_16_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_16_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_16_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_17_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_17_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_17_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_17_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_18_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_18_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_18_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_18_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_19_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_19_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_19_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_19_value; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_20_ren; // @[RocketTile.scala:147:20] wire _core_io_rocc_csrs_20_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_20_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_rocc_csrs_20_value; // @[RocketTile.scala:147:20] wire _core_io_wfi; // @[RocketTile.scala:147:20] wire _respArb_io_in_0_q_io_enq_ready; // @[Decoupled.scala:362:21] wire _respArb_io_in_0_q_io_deq_valid; // @[Decoupled.scala:362:21] wire [4:0] _respArb_io_in_0_q_io_deq_bits_rd; // @[Decoupled.scala:362:21] wire [63:0] _respArb_io_in_0_q_io_deq_bits_data; // @[Decoupled.scala:362:21] wire _dcIF_io_requestor_req_ready; // @[LazyRoCC.scala:106:24] wire _dcIF_io_requestor_resp_valid; // @[LazyRoCC.scala:106:24] wire [39:0] _dcIF_io_requestor_resp_bits_addr; // @[LazyRoCC.scala:106:24] wire [7:0] _dcIF_io_requestor_resp_bits_tag; // @[LazyRoCC.scala:106:24] wire [4:0] _dcIF_io_requestor_resp_bits_cmd; // @[LazyRoCC.scala:106:24] wire [1:0] _dcIF_io_requestor_resp_bits_size; // @[LazyRoCC.scala:106:24] wire _dcIF_io_requestor_resp_bits_signed; // @[LazyRoCC.scala:106:24] wire [1:0] _dcIF_io_requestor_resp_bits_dprv; // @[LazyRoCC.scala:106:24] wire _dcIF_io_requestor_resp_bits_dv; // @[LazyRoCC.scala:106:24] wire [63:0] _dcIF_io_requestor_resp_bits_data; // @[LazyRoCC.scala:106:24] wire [7:0] _dcIF_io_requestor_resp_bits_mask; // @[LazyRoCC.scala:106:24] wire _dcIF_io_requestor_resp_bits_replay; // @[LazyRoCC.scala:106:24] wire _dcIF_io_requestor_resp_bits_has_data; // @[LazyRoCC.scala:106:24] wire [63:0] _dcIF_io_requestor_resp_bits_data_word_bypass; // @[LazyRoCC.scala:106:24] wire [63:0] _dcIF_io_requestor_resp_bits_data_raw; // @[LazyRoCC.scala:106:24] wire [63:0] _dcIF_io_requestor_resp_bits_store_data; // @[LazyRoCC.scala:106:24] wire _dcIF_io_cache_req_valid; // @[LazyRoCC.scala:106:24] wire [63:0] _dcIF_io_cache_s1_data_data; // @[LazyRoCC.scala:106:24] wire [7:0] _dcIF_io_cache_s1_data_mask; // @[LazyRoCC.scala:106:24] wire _cmdRouter_io_in_ready; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_valid; // @[LazyRoCC.scala:102:27] wire [6:0] _cmdRouter_io_out_0_bits_inst_funct; // @[LazyRoCC.scala:102:27] wire [4:0] _cmdRouter_io_out_0_bits_inst_rs2; // @[LazyRoCC.scala:102:27] wire [4:0] _cmdRouter_io_out_0_bits_inst_rs1; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_inst_xd; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_inst_xs1; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_inst_xs2; // @[LazyRoCC.scala:102:27] wire [4:0] _cmdRouter_io_out_0_bits_inst_rd; // @[LazyRoCC.scala:102:27] wire [6:0] _cmdRouter_io_out_0_bits_inst_opcode; // @[LazyRoCC.scala:102:27] wire [63:0] _cmdRouter_io_out_0_bits_rs1; // @[LazyRoCC.scala:102:27] wire [63:0] _cmdRouter_io_out_0_bits_rs2; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_debug; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_cease; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_wfi; // @[LazyRoCC.scala:102:27] wire [31:0] _cmdRouter_io_out_0_bits_status_isa; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_dprv; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_dv; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_prv; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_v; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_sd; // @[LazyRoCC.scala:102:27] wire [22:0] _cmdRouter_io_out_0_bits_status_zero2; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_mpv; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_gva; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_mbe; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_sbe; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_sxl; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_uxl; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_sd_rv32; // @[LazyRoCC.scala:102:27] wire [7:0] _cmdRouter_io_out_0_bits_status_zero1; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_tsr; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_tw; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_tvm; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_mxr; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_sum; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_mprv; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_xs; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_fs; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_mpp; // @[LazyRoCC.scala:102:27] wire [1:0] _cmdRouter_io_out_0_bits_status_vs; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_spp; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_mpie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_ube; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_spie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_upie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_mie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_hie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_sie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_out_0_bits_status_uie; // @[LazyRoCC.scala:102:27] wire _cmdRouter_io_busy; // @[LazyRoCC.scala:102:27] wire _respArb_io_in_0_ready; // @[LazyRoCC.scala:101:25] wire _respArb_io_out_valid; // @[LazyRoCC.scala:101:25] wire [4:0] _respArb_io_out_bits_rd; // @[LazyRoCC.scala:101:25] wire [63:0] _respArb_io_out_bits_data; // @[LazyRoCC.scala:101:25] wire _ptw_io_requestor_0_req_ready; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_valid; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_ae_ptw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_ae_final; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pf; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_gf; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_hr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_hw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_hx; // @[PTW.scala:802:19] wire [9:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_0_resp_bits_pte_ppn; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_d; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_g; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_u; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_r; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_v; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_resp_bits_level; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_homogeneous; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_gpa_valid; // @[PTW.scala:802:19] wire [38:0] _ptw_io_requestor_0_resp_bits_gpa_bits; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_gpa_is_pte; // @[PTW.scala:802:19] wire [3:0] _ptw_io_requestor_0_ptbr_mode; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_0_ptbr_ppn; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_status_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_mpp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_hstatus_spvp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_hstatus_spv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_hstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_gstatus_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_v; // @[PTW.scala:802:19] wire [22:0] _ptw_io_requestor_0_gstatus_zero2; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mbe; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sbe; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_sxl; // @[PTW.scala:802:19] wire [7:0] _ptw_io_requestor_0_gstatus_zero1; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_mpp; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_vs; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_ube; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_upie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_hie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_uie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_0_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_0_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_0_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_1_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_1_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_1_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_2_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_2_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_2_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_3_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_3_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_3_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_4_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_4_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_4_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_5_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_5_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_5_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_6_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_6_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_6_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_7_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_7_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_7_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_0_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_0_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_0_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_1_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_1_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_1_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_2_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_2_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_2_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_3_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_3_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_3_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_req_ready; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_valid; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_ae_ptw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_ae_final; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pf; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_gf; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_hr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_hw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_hx; // @[PTW.scala:802:19] wire [9:0] _ptw_io_requestor_1_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_1_resp_bits_pte_ppn; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_d; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_g; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_u; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_r; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_v; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_resp_bits_level; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_homogeneous; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_gpa_valid; // @[PTW.scala:802:19] wire [38:0] _ptw_io_requestor_1_resp_bits_gpa_bits; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_gpa_is_pte; // @[PTW.scala:802:19] wire [3:0] _ptw_io_requestor_1_ptbr_mode; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_1_ptbr_ppn; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_status_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_mpp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_hstatus_spvp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_hstatus_spv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_hstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_gstatus_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_v; // @[PTW.scala:802:19] wire [22:0] _ptw_io_requestor_1_gstatus_zero2; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mbe; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sbe; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_sxl; // @[PTW.scala:802:19] wire [7:0] _ptw_io_requestor_1_gstatus_zero1; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_mpp; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_vs; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_ube; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_upie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_hie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_uie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_0_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_0_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_0_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_1_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_1_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_1_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_2_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_2_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_2_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_3_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_3_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_3_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_4_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_4_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_4_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_5_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_5_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_5_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_6_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_6_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_6_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_7_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_7_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_7_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_0_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_0_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_0_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_1_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_1_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_1_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_2_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_2_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_2_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_3_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_3_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_3_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_req_ready; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_valid; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_ae_ptw; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_ae_final; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pf; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_gf; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_hr; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_hw; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_hx; // @[PTW.scala:802:19] wire [9:0] _ptw_io_requestor_2_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_2_resp_bits_pte_ppn; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_d; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_g; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_u; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_r; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_pte_v; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_resp_bits_level; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_homogeneous; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_gpa_valid; // @[PTW.scala:802:19] wire [38:0] _ptw_io_requestor_2_resp_bits_gpa_bits; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_resp_bits_gpa_is_pte; // @[PTW.scala:802:19] wire [3:0] _ptw_io_requestor_2_ptbr_mode; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_2_ptbr_ppn; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_status_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_status_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_status_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_status_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_status_mpp; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_status_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_hstatus_spvp; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_hstatus_spv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_hstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_gstatus_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_gstatus_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_gstatus_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_v; // @[PTW.scala:802:19] wire [22:0] _ptw_io_requestor_2_gstatus_zero2; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_mbe; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_sbe; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_gstatus_sxl; // @[PTW.scala:802:19] wire [7:0] _ptw_io_requestor_2_gstatus_zero1; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_gstatus_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_gstatus_mpp; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_gstatus_vs; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_ube; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_upie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_hie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_gstatus_uie; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_0_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_0_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_0_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_0_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_0_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_0_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_0_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_1_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_1_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_1_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_1_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_1_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_1_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_1_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_2_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_2_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_2_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_2_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_2_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_2_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_2_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_3_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_3_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_3_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_3_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_3_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_3_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_3_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_4_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_4_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_4_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_4_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_4_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_4_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_4_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_5_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_5_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_5_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_5_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_5_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_5_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_5_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_6_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_6_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_6_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_6_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_6_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_6_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_6_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_7_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_2_pmp_7_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_7_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_7_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_pmp_7_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_2_pmp_7_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_2_pmp_7_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_0_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_0_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_0_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_1_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_1_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_1_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_2_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_2_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_2_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_3_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_2_customCSRs_csrs_3_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_2_customCSRs_csrs_3_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_req_ready; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_valid; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_ae_ptw; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_ae_final; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pf; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_gf; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_hr; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_hw; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_hx; // @[PTW.scala:802:19] wire [9:0] _ptw_io_requestor_3_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_3_resp_bits_pte_ppn; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_d; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_g; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_u; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_r; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_pte_v; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_resp_bits_level; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_homogeneous; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_gpa_valid; // @[PTW.scala:802:19] wire [38:0] _ptw_io_requestor_3_resp_bits_gpa_bits; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_resp_bits_gpa_is_pte; // @[PTW.scala:802:19] wire [3:0] _ptw_io_requestor_3_ptbr_mode; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_3_ptbr_ppn; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_status_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_status_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_status_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_status_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_status_mpp; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_status_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_hstatus_spvp; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_hstatus_spv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_hstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_gstatus_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_gstatus_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_gstatus_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_v; // @[PTW.scala:802:19] wire [22:0] _ptw_io_requestor_3_gstatus_zero2; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_mbe; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_sbe; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_gstatus_sxl; // @[PTW.scala:802:19] wire [7:0] _ptw_io_requestor_3_gstatus_zero1; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_gstatus_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_gstatus_mpp; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_gstatus_vs; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_ube; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_upie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_hie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_gstatus_uie; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_0_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_0_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_0_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_0_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_0_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_0_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_0_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_1_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_1_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_1_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_1_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_1_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_1_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_1_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_2_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_2_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_2_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_2_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_2_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_2_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_2_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_3_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_3_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_3_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_3_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_3_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_3_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_3_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_4_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_4_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_4_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_4_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_4_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_4_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_4_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_5_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_5_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_5_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_5_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_5_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_5_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_5_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_6_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_6_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_6_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_6_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_6_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_6_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_6_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_7_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_3_pmp_7_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_7_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_7_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_pmp_7_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_3_pmp_7_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_3_pmp_7_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_0_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_0_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_0_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_1_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_1_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_1_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_2_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_2_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_2_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_3_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_3_customCSRs_csrs_3_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_3_customCSRs_csrs_3_value; // @[PTW.scala:802:19] wire _ptw_io_mem_req_valid; // @[PTW.scala:802:19] wire [39:0] _ptw_io_mem_req_bits_addr; // @[PTW.scala:802:19] wire _ptw_io_mem_req_bits_dv; // @[PTW.scala:802:19] wire _ptw_io_mem_s1_kill; // @[PTW.scala:802:19] wire _ptw_io_dpath_perf_pte_miss; // @[PTW.scala:802:19] wire _ptw_io_dpath_perf_pte_hit; // @[PTW.scala:802:19] wire _ptw_io_dpath_clock_enabled; // @[PTW.scala:802:19] wire _dcacheArb_io_requestor_0_req_ready; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_nack; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_nack_cause_raw; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_uncached; // @[HellaCache.scala:292:25] wire [31:0] _dcacheArb_io_requestor_0_s2_paddr; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_0_resp_bits_addr; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_0_resp_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_requestor_0_resp_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_0_resp_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_0_resp_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_dv; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_0_resp_bits_mask; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_replay; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_has_data; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_raw; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_store_data; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_replay_next; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ma_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_pf_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ae_st; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_0_s2_gpa; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_ordered; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_store_pending; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_acquire; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_release; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_grant; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_tlbMiss; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_blocked; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_req_ready; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_nack; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_nack_cause_raw; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_uncached; // @[HellaCache.scala:292:25] wire [31:0] _dcacheArb_io_requestor_1_s2_paddr; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_1_resp_bits_addr; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_1_resp_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_requestor_1_resp_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_1_resp_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_1_resp_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_dv; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_1_resp_bits_mask; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_replay; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_has_data; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_raw; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_store_data; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_replay_next; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ma_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_pf_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ae_st; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_1_s2_gpa; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_ordered; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_store_pending; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_acquire; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_release; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_grant; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_tlbMiss; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_blocked; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_req_ready; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_nack; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_nack_cause_raw; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_uncached; // @[HellaCache.scala:292:25] wire [31:0] _dcacheArb_io_requestor_2_s2_paddr; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_resp_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_2_resp_bits_addr; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_2_resp_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_requestor_2_resp_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_2_resp_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_resp_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_2_resp_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_resp_bits_dv; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_2_resp_bits_data; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_2_resp_bits_mask; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_resp_bits_replay; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_resp_bits_has_data; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_2_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_2_resp_bits_data_raw; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_2_resp_bits_store_data; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_replay_next; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_xcpt_ma_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_xcpt_pf_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_s2_xcpt_ae_st; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_2_s2_gpa; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_ordered; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_store_pending; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_acquire; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_release; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_grant; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_tlbMiss; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_blocked; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_2_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_mem_req_bits_addr; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_mem_req_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_mem_req_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_mem_req_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_mem_req_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_dv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_phys; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_no_resp; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_s1_kill; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_mem_s1_data_data; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_mem_s1_data_mask; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_keep_clock_enabled; // @[HellaCache.scala:292:25] wire _fpuOpt_io_fcsr_flags_valid; // @[RocketTile.scala:242:62] wire [4:0] _fpuOpt_io_fcsr_flags_bits; // @[RocketTile.scala:242:62] wire [63:0] _fpuOpt_io_store_data; // @[RocketTile.scala:242:62] wire [63:0] _fpuOpt_io_toint_data; // @[RocketTile.scala:242:62] wire _fpuOpt_io_fcsr_rdy; // @[RocketTile.scala:242:62] wire _fpuOpt_io_nack_mem; // @[RocketTile.scala:242:62] wire _fpuOpt_io_illegal_rm; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ldst; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_wen; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ren1; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ren2; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ren3; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_swap12; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_swap23; // @[RocketTile.scala:242:62] wire [1:0] _fpuOpt_io_dec_typeTagIn; // @[RocketTile.scala:242:62] wire [1:0] _fpuOpt_io_dec_typeTagOut; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_fromint; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_toint; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_fastpipe; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_fma; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_div; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_sqrt; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_wflags; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_vec; // @[RocketTile.scala:242:62] wire _fpuOpt_io_sboard_set; // @[RocketTile.scala:242:62] wire _fpuOpt_io_sboard_clr; // @[RocketTile.scala:242:62] wire [4:0] _fpuOpt_io_sboard_clra; // @[RocketTile.scala:242:62] wire _rerocc_buffer_auto_in_req_ready; // @[Protocol.scala:134:35] wire _rerocc_buffer_auto_in_resp_valid; // @[Protocol.scala:134:35] wire [2:0] _rerocc_buffer_auto_in_resp_bits_opcode; // @[Protocol.scala:134:35] wire [3:0] _rerocc_buffer_auto_in_resp_bits_client_id; // @[Protocol.scala:134:35] wire [2:0] _rerocc_buffer_auto_in_resp_bits_manager_id; // @[Protocol.scala:134:35] wire [63:0] _rerocc_buffer_auto_in_resp_bits_data; // @[Protocol.scala:134:35] wire _frontend_io_cpu_resp_valid; // @[Frontend.scala:393:28] wire [1:0] _frontend_io_cpu_resp_bits_btb_cfiType; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_btb_taken; // @[Frontend.scala:393:28] wire [1:0] _frontend_io_cpu_resp_bits_btb_mask; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_btb_bridx; // @[Frontend.scala:393:28] wire [38:0] _frontend_io_cpu_resp_bits_btb_target; // @[Frontend.scala:393:28] wire [4:0] _frontend_io_cpu_resp_bits_btb_entry; // @[Frontend.scala:393:28] wire [7:0] _frontend_io_cpu_resp_bits_btb_bht_history; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_btb_bht_value; // @[Frontend.scala:393:28] wire [39:0] _frontend_io_cpu_resp_bits_pc; // @[Frontend.scala:393:28] wire [31:0] _frontend_io_cpu_resp_bits_data; // @[Frontend.scala:393:28] wire [1:0] _frontend_io_cpu_resp_bits_mask; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_xcpt_pf_inst; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_xcpt_gf_inst; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_xcpt_ae_inst; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_replay; // @[Frontend.scala:393:28] wire _frontend_io_cpu_gpa_valid; // @[Frontend.scala:393:28] wire [39:0] _frontend_io_cpu_gpa_bits; // @[Frontend.scala:393:28] wire _frontend_io_cpu_gpa_is_pte; // @[Frontend.scala:393:28] wire [39:0] _frontend_io_cpu_npc; // @[Frontend.scala:393:28] wire _frontend_io_cpu_perf_acquire; // @[Frontend.scala:393:28] wire _frontend_io_cpu_perf_tlbMiss; // @[Frontend.scala:393:28] wire _frontend_io_ptw_req_valid; // @[Frontend.scala:393:28] wire _frontend_io_ptw_req_bits_valid; // @[Frontend.scala:393:28] wire [26:0] _frontend_io_ptw_req_bits_bits_addr; // @[Frontend.scala:393:28] wire _frontend_io_ptw_req_bits_bits_need_gpa; // @[Frontend.scala:393:28] wire _rerocc_client_auto_re_ro_cc_out_req_valid; // @[Configs.scala:14:35] wire [2:0] _rerocc_client_auto_re_ro_cc_out_req_bits_opcode; // @[Configs.scala:14:35] wire [3:0] _rerocc_client_auto_re_ro_cc_out_req_bits_client_id; // @[Configs.scala:14:35] wire [2:0] _rerocc_client_auto_re_ro_cc_out_req_bits_manager_id; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_auto_re_ro_cc_out_req_bits_data; // @[Configs.scala:14:35] wire _rerocc_client_auto_re_ro_cc_out_resp_ready; // @[Configs.scala:14:35] wire _rerocc_client_io_cmd_ready; // @[Configs.scala:14:35] wire _rerocc_client_io_resp_valid; // @[Configs.scala:14:35] wire [4:0] _rerocc_client_io_resp_bits_rd; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_resp_bits_data; // @[Configs.scala:14:35] wire _rerocc_client_io_busy; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_0_sdata; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_1_sdata; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_2_sdata; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_3_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_5_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_5_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_6_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_6_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_7_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_7_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_8_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_8_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_9_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_9_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_10_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_10_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_11_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_11_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_12_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_12_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_13_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_13_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_14_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_14_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_15_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_15_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_16_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_16_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_17_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_17_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_18_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_18_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_19_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_19_sdata; // @[Configs.scala:14:35] wire _rerocc_client_io_csrs_20_stall; // @[Configs.scala:14:35] wire [63:0] _rerocc_client_io_csrs_20_sdata; // @[Configs.scala:14:35] wire _dcache_io_cpu_req_ready; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_nack; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_nack_cause_raw; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_uncached; // @[HellaCache.scala:278:43] wire [31:0] _dcache_io_cpu_s2_paddr; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_valid; // @[HellaCache.scala:278:43] wire [39:0] _dcache_io_cpu_resp_bits_addr; // @[HellaCache.scala:278:43] wire [7:0] _dcache_io_cpu_resp_bits_tag; // @[HellaCache.scala:278:43] wire [4:0] _dcache_io_cpu_resp_bits_cmd; // @[HellaCache.scala:278:43] wire [1:0] _dcache_io_cpu_resp_bits_size; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_signed; // @[HellaCache.scala:278:43] wire [1:0] _dcache_io_cpu_resp_bits_dprv; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_dv; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_data; // @[HellaCache.scala:278:43] wire [7:0] _dcache_io_cpu_resp_bits_mask; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_replay; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_has_data; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_data_word_bypass; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_data_raw; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_store_data; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_replay_next; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ma_ld; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ma_st; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_pf_ld; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_pf_st; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ae_ld; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ae_st; // @[HellaCache.scala:278:43] wire [39:0] _dcache_io_cpu_s2_gpa; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_ordered; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_store_pending; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_acquire; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_release; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_grant; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_tlbMiss; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_blocked; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:278:43] wire _dcache_io_ptw_req_valid; // @[HellaCache.scala:278:43] wire [26:0] _dcache_io_ptw_req_bits_bits_addr; // @[HellaCache.scala:278:43] wire _dcache_io_ptw_req_bits_bits_need_gpa; // @[HellaCache.scala:278:43] wire auto_rerocc_buffer_out_req_ready_0 = auto_rerocc_buffer_out_req_ready; // @[RocketTile.scala:141:7] wire auto_rerocc_buffer_out_resp_valid_0 = auto_rerocc_buffer_out_resp_valid; // @[RocketTile.scala:141:7] wire [2:0] auto_rerocc_buffer_out_resp_bits_opcode_0 = auto_rerocc_buffer_out_resp_bits_opcode; // @[RocketTile.scala:141:7] wire [3:0] auto_rerocc_buffer_out_resp_bits_client_id_0 = auto_rerocc_buffer_out_resp_bits_client_id; // @[RocketTile.scala:141:7] wire [2:0] auto_rerocc_buffer_out_resp_bits_manager_id_0 = auto_rerocc_buffer_out_resp_bits_manager_id; // @[RocketTile.scala:141:7] wire [63:0] auto_rerocc_buffer_out_resp_bits_data_0 = auto_rerocc_buffer_out_resp_bits_data; // @[RocketTile.scala:141:7] wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[RocketTile.scala:141:7] wire auto_buffer_out_b_valid_0 = auto_buffer_out_b_valid; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_b_bits_opcode_0 = auto_buffer_out_b_bits_opcode; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_b_bits_param_0 = auto_buffer_out_b_bits_param; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_b_bits_size_0 = auto_buffer_out_b_bits_size; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_b_bits_source_0 = auto_buffer_out_b_bits_source; // @[RocketTile.scala:141:7] wire [31:0] auto_buffer_out_b_bits_address_0 = auto_buffer_out_b_bits_address; // @[RocketTile.scala:141:7] wire [7:0] auto_buffer_out_b_bits_mask_0 = auto_buffer_out_b_bits_mask; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_b_bits_data_0 = auto_buffer_out_b_bits_data; // @[RocketTile.scala:141:7] wire auto_buffer_out_b_bits_corrupt_0 = auto_buffer_out_b_bits_corrupt; // @[RocketTile.scala:141:7] wire auto_buffer_out_c_ready_0 = auto_buffer_out_c_ready; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[RocketTile.scala:141:7] wire auto_buffer_out_e_ready_0 = auto_buffer_out_e_ready; // @[RocketTile.scala:141:7] wire auto_int_local_in_3_0_0 = auto_int_local_in_3_0; // @[RocketTile.scala:141:7] wire auto_int_local_in_2_0_0 = auto_int_local_in_2_0; // @[RocketTile.scala:141:7] wire auto_int_local_in_1_0_0 = auto_int_local_in_1_0; // @[RocketTile.scala:141:7] wire auto_int_local_in_1_1_0 = auto_int_local_in_1_1; // @[RocketTile.scala:141:7] wire auto_int_local_in_0_0_0 = auto_int_local_in_0_0; // @[RocketTile.scala:141:7] wire auto_hartid_in_0 = auto_hartid_in; // @[RocketTile.scala:141:7] wire auto_buffer_out_a_bits_corrupt = 1'h0; // @[RocketTile.scala:141:7] wire auto_buffer_out_c_bits_corrupt = 1'h0; // @[RocketTile.scala:141:7] wire auto_cease_out_0 = 1'h0; // @[RocketTile.scala:141:7] wire auto_halt_out_0 = 1'h0; // @[RocketTile.scala:141:7] wire auto_trace_core_source_out_group_0_iretire = 1'h0; // @[RocketTile.scala:141:7] wire auto_trace_core_source_out_group_0_ilastsize = 1'h0; // @[RocketTile.scala:141:7] wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire broadcast_1_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_1_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast_1__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nexus_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire nexus_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire nexus__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nexus_1_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire nexus_1_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire nexus_1__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nexus_1_x1_bundleOut_x_sourceOpt_enable = 1'h0; // @[BaseTile.scala:305:19] wire nexus_1_x1_bundleOut_x_sourceOpt_stall = 1'h0; // @[BaseTile.scala:305:19] wire nexus_1_nodeOut_enable = 1'h0; // @[MixedNode.scala:542:17] wire nexus_1_nodeOut_stall = 1'h0; // @[MixedNode.scala:542:17] wire nexus_1_defaultWireOpt_enable = 1'h0; // @[BaseTile.scala:305:19] wire nexus_1_defaultWireOpt_stall = 1'h0; // @[BaseTile.scala:305:19] wire broadcast_2_auto_in_0_rvalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_auto_in_0_wvalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_auto_in_0_ivalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_2_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast_2__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire broadcast_2_nodeIn_0_rvalid_0 = 1'h0; // @[MixedNode.scala:551:17] wire broadcast_2_nodeIn_0_wvalid_0 = 1'h0; // @[MixedNode.scala:551:17] wire broadcast_2_nodeIn_0_ivalid_0 = 1'h0; // @[MixedNode.scala:551:17] wire widget_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire widget_anonOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire widget_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire widget_anonIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire widget_1_auto_anon_in_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire widget_1_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire widget_1_anonOut_d_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire widget_1_anonIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire widget_1_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire widget_1_anonIn_d_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire buffer_auto_in_a_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_auto_in_c_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_auto_out_a_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_auto_out_c_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire buffer_nodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire buffer_nodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire buffer_nodeIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire tlOtherMastersNodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire tlOtherMastersNodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire traceCoreSourceNodeOut_group_0_iretire = 1'h0; // @[MixedNode.scala:542:17] wire traceCoreSourceNodeOut_group_0_ilastsize = 1'h0; // @[MixedNode.scala:542:17] wire bundleIn_x_sourceOpt_enable = 1'h0; // @[BaseTile.scala:305:19] wire bundleIn_x_sourceOpt_stall = 1'h0; // @[BaseTile.scala:305:19] wire traceAuxSinkNodeIn_enable = 1'h0; // @[MixedNode.scala:551:17] wire traceAuxSinkNodeIn_stall = 1'h0; // @[MixedNode.scala:551:17] wire bpwatchSourceNodeOut_0_rvalid_0 = 1'h0; // @[MixedNode.scala:542:17] wire bpwatchSourceNodeOut_0_wvalid_0 = 1'h0; // @[MixedNode.scala:542:17] wire bpwatchSourceNodeOut_0_ivalid_0 = 1'h0; // @[MixedNode.scala:542:17] wire haltNodeOut_0 = 1'h0; // @[MixedNode.scala:542:17] wire ceaseNodeOut_0 = 1'h0; // @[MixedNode.scala:542:17] wire [2:0] widget_1_auto_anon_in_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonIn_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_in_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_out_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_anonOut_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_anonIn_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_auto_anon_in_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_auto_anon_out_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_anonOut_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_anonIn_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_in_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonIn_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_in_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_out_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_anonOut_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_anonIn_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [31:0] auto_reset_vector_in = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_auto_in = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_auto_out_1 = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_auto_out_0 = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_nodeIn = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_nodeOut = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_x1_nodeOut = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] resetVectorSinkNodeIn = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] reset_vectorOut = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] reset_vectorIn = 32'h10000; // @[RocketTile.scala:141:7] wire [3:0] auto_trace_core_source_out_group_0_itype = 4'h0; // @[RocketTile.scala:141:7] wire [3:0] auto_trace_core_source_out_priv = 4'h0; // @[RocketTile.scala:141:7] wire [3:0] traceCoreSourceNodeOut_group_0_itype = 4'h0; // @[MixedNode.scala:542:17] wire [3:0] traceCoreSourceNodeOut_priv = 4'h0; // @[MixedNode.scala:542:17] wire [31:0] auto_trace_core_source_out_group_0_iaddr = 32'h0; // @[RocketTile.scala:141:7] wire [31:0] auto_trace_core_source_out_tval = 32'h0; // @[RocketTile.scala:141:7] wire [31:0] auto_trace_core_source_out_cause = 32'h0; // @[RocketTile.scala:141:7] wire [31:0] traceCoreSourceNodeOut_group_0_iaddr = 32'h0; // @[MixedNode.scala:542:17] wire [31:0] traceCoreSourceNodeOut_tval = 32'h0; // @[MixedNode.scala:542:17] wire [31:0] traceCoreSourceNodeOut_cause = 32'h0; // @[MixedNode.scala:542:17] wire widget_1_auto_anon_in_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire widget_1_anonIn_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire buffer_auto_out_a_ready = auto_buffer_out_a_ready_0; // @[Buffer.scala:40:9] wire buffer_auto_out_a_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_out_b_ready; // @[Buffer.scala:40:9] wire buffer_auto_out_b_valid = auto_buffer_out_b_valid_0; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_b_bits_opcode = auto_buffer_out_b_bits_opcode_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_b_bits_param = auto_buffer_out_b_bits_param_0; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_b_bits_size = auto_buffer_out_b_bits_size_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_b_bits_source = auto_buffer_out_b_bits_source_0; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_out_b_bits_address = auto_buffer_out_b_bits_address_0; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_out_b_bits_mask = auto_buffer_out_b_bits_mask_0; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_b_bits_data = auto_buffer_out_b_bits_data_0; // @[Buffer.scala:40:9] wire buffer_auto_out_b_bits_corrupt = auto_buffer_out_b_bits_corrupt_0; // @[Buffer.scala:40:9] wire buffer_auto_out_c_ready = auto_buffer_out_c_ready_0; // @[Buffer.scala:40:9] wire buffer_auto_out_c_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_out_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_c_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_out_d_ready; // @[Buffer.scala:40:9] wire buffer_auto_out_d_valid = auto_buffer_out_d_valid_0; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_d_bits_opcode = auto_buffer_out_d_bits_opcode_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_d_bits_param = auto_buffer_out_d_bits_param_0; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_d_bits_size = auto_buffer_out_d_bits_size_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_d_bits_source = auto_buffer_out_d_bits_source_0; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_d_bits_sink = auto_buffer_out_d_bits_sink_0; // @[Buffer.scala:40:9] wire buffer_auto_out_d_bits_denied = auto_buffer_out_d_bits_denied_0; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_d_bits_data = auto_buffer_out_d_bits_data_0; // @[Buffer.scala:40:9] wire buffer_auto_out_d_bits_corrupt = auto_buffer_out_d_bits_corrupt_0; // @[Buffer.scala:40:9] wire buffer_auto_out_e_ready = auto_buffer_out_e_ready_0; // @[Buffer.scala:40:9] wire buffer_auto_out_e_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_e_bits_sink; // @[Buffer.scala:40:9] wire wfiNodeOut_0; // @[MixedNode.scala:542:17] wire x1_int_localIn_2_0 = auto_int_local_in_3_0_0; // @[RocketTile.scala:141:7] wire x1_int_localIn_1_0 = auto_int_local_in_2_0_0; // @[RocketTile.scala:141:7] wire x1_int_localIn_0 = auto_int_local_in_1_0_0; // @[RocketTile.scala:141:7] wire x1_int_localIn_1 = auto_int_local_in_1_1_0; // @[RocketTile.scala:141:7] wire int_localIn_0 = auto_int_local_in_0_0_0; // @[RocketTile.scala:141:7] wire traceSourceNodeOut_insns_0_valid; // @[MixedNode.scala:542:17] wire [39:0] traceSourceNodeOut_insns_0_iaddr; // @[MixedNode.scala:542:17] wire [31:0] traceSourceNodeOut_insns_0_insn; // @[MixedNode.scala:542:17] wire [2:0] traceSourceNodeOut_insns_0_priv; // @[MixedNode.scala:542:17] wire traceSourceNodeOut_insns_0_exception; // @[MixedNode.scala:542:17] wire traceSourceNodeOut_insns_0_interrupt; // @[MixedNode.scala:542:17] wire [63:0] traceSourceNodeOut_insns_0_cause; // @[MixedNode.scala:542:17] wire [39:0] traceSourceNodeOut_insns_0_tval; // @[MixedNode.scala:542:17] wire [63:0] traceSourceNodeOut_time; // @[MixedNode.scala:542:17] wire hartidIn = auto_hartid_in_0; // @[RocketTile.scala:141:7] wire [2:0] auto_rerocc_buffer_out_req_bits_opcode_0; // @[RocketTile.scala:141:7] wire [3:0] auto_rerocc_buffer_out_req_bits_client_id_0; // @[RocketTile.scala:141:7] wire [2:0] auto_rerocc_buffer_out_req_bits_manager_id_0; // @[RocketTile.scala:141:7] wire [63:0] auto_rerocc_buffer_out_req_bits_data_0; // @[RocketTile.scala:141:7] wire auto_rerocc_buffer_out_req_valid_0; // @[RocketTile.scala:141:7] wire auto_rerocc_buffer_out_resp_ready_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_a_bits_param_0; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_a_bits_size_0; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_a_bits_source_0; // @[RocketTile.scala:141:7] wire [31:0] auto_buffer_out_a_bits_address_0; // @[RocketTile.scala:141:7] wire [7:0] auto_buffer_out_a_bits_mask_0; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_a_bits_data_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_a_valid_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_b_ready_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_c_bits_opcode_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_c_bits_param_0; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_c_bits_size_0; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_c_bits_source_0; // @[RocketTile.scala:141:7] wire [31:0] auto_buffer_out_c_bits_address_0; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_c_bits_data_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_c_valid_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_ready_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_e_bits_sink_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_e_valid_0; // @[RocketTile.scala:141:7] wire auto_wfi_out_0_0; // @[RocketTile.scala:141:7] wire auto_trace_source_out_insns_0_valid_0; // @[RocketTile.scala:141:7] wire [39:0] auto_trace_source_out_insns_0_iaddr_0; // @[RocketTile.scala:141:7] wire [31:0] auto_trace_source_out_insns_0_insn_0; // @[RocketTile.scala:141:7] wire [2:0] auto_trace_source_out_insns_0_priv_0; // @[RocketTile.scala:141:7] wire auto_trace_source_out_insns_0_exception_0; // @[RocketTile.scala:141:7] wire auto_trace_source_out_insns_0_interrupt_0; // @[RocketTile.scala:141:7] wire [63:0] auto_trace_source_out_insns_0_cause_0; // @[RocketTile.scala:141:7] wire [39:0] auto_trace_source_out_insns_0_tval_0; // @[RocketTile.scala:141:7] wire [63:0] auto_trace_source_out_time_0; // @[RocketTile.scala:141:7] wire hartidOut; // @[MixedNode.scala:542:17] wire broadcast_nodeIn = broadcast_auto_in; // @[MixedNode.scala:551:17] wire broadcast_nodeOut; // @[MixedNode.scala:542:17] wire broadcast_auto_out; // @[BundleBridgeNexus.scala:20:9] wire hartIdSinkNodeIn = broadcast_auto_out; // @[MixedNode.scala:551:17] assign broadcast_nodeOut = broadcast_nodeIn; // @[MixedNode.scala:542:17, :551:17] assign broadcast_auto_out = broadcast_nodeOut; // @[MixedNode.scala:542:17] wire bpwatchSourceNodeOut_0_valid_0; // @[MixedNode.scala:542:17] wire broadcast_2_nodeIn_0_valid_0 = broadcast_2_auto_in_0_valid_0; // @[MixedNode.scala:551:17] wire [2:0] bpwatchSourceNodeOut_0_action; // @[MixedNode.scala:542:17] wire [2:0] broadcast_2_nodeIn_0_action = broadcast_2_auto_in_0_action; // @[MixedNode.scala:551:17] wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17] wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonIn_b_ready = widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9] wire widget_anonIn_b_valid; // @[MixedNode.scala:551:17] wire [2:0] widget_anonIn_b_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] widget_anonIn_b_bits_param; // @[MixedNode.scala:551:17] wire [3:0] widget_anonIn_b_bits_size; // @[MixedNode.scala:551:17] wire widget_anonIn_b_bits_source; // @[MixedNode.scala:551:17] wire [31:0] widget_anonIn_b_bits_address; // @[MixedNode.scala:551:17] wire [7:0] widget_anonIn_b_bits_mask; // @[MixedNode.scala:551:17] wire [63:0] widget_anonIn_b_bits_data; // @[MixedNode.scala:551:17] wire widget_anonIn_b_bits_corrupt; // @[MixedNode.scala:551:17] wire widget_anonIn_c_ready; // @[MixedNode.scala:551:17] wire widget_anonIn_c_valid = widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_c_bits_opcode = widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_c_bits_param = widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonIn_c_bits_size = widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonIn_c_bits_source = widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_anonIn_c_bits_address = widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonIn_c_bits_data = widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9] wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire widget_anonIn_e_ready; // @[MixedNode.scala:551:17] wire widget_anonIn_e_valid = widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_e_bits_sink = widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9] wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire widget_anonOut_b_ready; // @[MixedNode.scala:542:17] wire widget_anonOut_b_valid = widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonOut_b_bits_opcode = widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_anonOut_b_bits_param = widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonOut_b_bits_size = widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonOut_b_bits_source = widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_anonOut_b_bits_address = widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_anonOut_b_bits_mask = widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonOut_b_bits_data = widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonOut_b_bits_corrupt = widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_anonOut_c_ready = widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9] wire widget_anonOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] widget_anonOut_c_bits_size; // @[MixedNode.scala:542:17] wire widget_anonOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] widget_anonOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] widget_anonOut_c_bits_data; // @[MixedNode.scala:542:17] wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17] wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_anonOut_e_ready = widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9] wire widget_anonOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_e_bits_sink; // @[MixedNode.scala:542:17] wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_b_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_in_b_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_b_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_in_b_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_in_b_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_b_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_e_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_c_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_c_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_c_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_out_c_bits_address; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_c_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_e_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_e_valid; // @[WidthWidget.scala:27:9] assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_b_ready = widget_anonOut_b_ready; // @[WidthWidget.scala:27:9] assign widget_anonIn_b_valid = widget_anonOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_opcode = widget_anonOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_param = widget_anonOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_size = widget_anonOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_source = widget_anonOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_address = widget_anonOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_mask = widget_anonOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_data = widget_anonOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_corrupt = widget_anonOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_c_ready = widget_anonOut_c_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_out_c_valid = widget_anonOut_c_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_opcode = widget_anonOut_c_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_param = widget_anonOut_c_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_size = widget_anonOut_c_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_source = widget_anonOut_c_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_address = widget_anonOut_c_bits_address; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_data = widget_anonOut_c_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9] assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_e_ready = widget_anonOut_e_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_out_e_valid = widget_anonOut_e_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_e_bits_sink = widget_anonOut_e_bits_sink; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9] assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_b_ready = widget_anonIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_in_b_valid = widget_anonIn_b_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_opcode = widget_anonIn_b_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_param = widget_anonIn_b_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_size = widget_anonIn_b_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_source = widget_anonIn_b_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_address = widget_anonIn_b_bits_address; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_mask = widget_anonIn_b_bits_mask; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_data = widget_anonIn_b_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_corrupt = widget_anonIn_b_bits_corrupt; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_c_ready = widget_anonIn_c_ready; // @[WidthWidget.scala:27:9] assign widget_anonOut_c_valid = widget_anonIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_opcode = widget_anonIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_param = widget_anonIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_size = widget_anonIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_source = widget_anonIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_address = widget_anonIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_data = widget_anonIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_e_ready = widget_anonIn_e_ready; // @[WidthWidget.scala:27:9] assign widget_anonOut_e_valid = widget_anonIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_e_bits_sink = widget_anonIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire widget_1_anonIn_a_ready; // @[MixedNode.scala:551:17] wire widget_1_anonIn_a_valid = widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [31:0] widget_1_anonIn_a_bits_address = widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_1_anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] widget_1_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] widget_1_anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] widget_1_anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [2:0] widget_1_anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire widget_1_anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] widget_1_anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire widget_1_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire widget_1_anonOut_a_ready = widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [31:0] widget_1_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire widget_1_anonOut_d_valid = widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_d_bits_opcode = widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_1_anonOut_d_bits_param = widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_anonOut_d_bits_size = widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_d_bits_sink = widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_d_bits_denied = widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_anonOut_d_bits_data = widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_d_bits_corrupt = widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_1_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9] wire [31:0] widget_1_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9] assign widget_1_anonIn_a_ready = widget_1_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_1_auto_anon_out_a_valid = widget_1_anonOut_a_valid; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_out_a_bits_address = widget_1_anonOut_a_bits_address; // @[WidthWidget.scala:27:9] assign widget_1_anonIn_d_valid = widget_1_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_opcode = widget_1_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_param = widget_1_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_size = widget_1_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_sink = widget_1_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_denied = widget_1_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_data = widget_1_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_corrupt = widget_1_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign widget_1_auto_anon_in_a_ready = widget_1_anonIn_a_ready; // @[WidthWidget.scala:27:9] assign widget_1_anonOut_a_valid = widget_1_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonOut_a_bits_address = widget_1_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_1_auto_anon_in_d_valid = widget_1_anonIn_d_valid; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_opcode = widget_1_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_param = widget_1_anonIn_d_bits_param; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_size = widget_1_anonIn_d_bits_size; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_sink = widget_1_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_denied = widget_1_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_data = widget_1_anonIn_d_bits_data; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_corrupt = widget_1_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire buffer_nodeIn_a_ready; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_a_ready = buffer_auto_in_a_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_a_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_a_valid = buffer_auto_in_a_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_opcode = buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_param = buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [3:0] buffer_nodeIn_a_bits_size = buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [1:0] buffer_nodeIn_a_bits_source = buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [31:0] buffer_nodeIn_a_bits_address = buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] tlOtherMastersNodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [7:0] buffer_nodeIn_a_bits_mask = buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire [63:0] buffer_nodeIn_a_bits_data = buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_b_ready; // @[MixedNode.scala:542:17] wire buffer_nodeIn_b_ready = buffer_auto_in_b_ready; // @[Buffer.scala:40:9] wire buffer_nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [2:0] buffer_nodeIn_b_bits_opcode; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_b_valid = buffer_auto_in_b_valid; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_b_bits_param; // @[MixedNode.scala:551:17] wire [2:0] tlOtherMastersNodeOut_b_bits_opcode = buffer_auto_in_b_bits_opcode; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeIn_b_bits_size; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_b_bits_param = buffer_auto_in_b_bits_param; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_b_bits_source; // @[MixedNode.scala:551:17] wire [3:0] tlOtherMastersNodeOut_b_bits_size = buffer_auto_in_b_bits_size; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeIn_b_bits_address; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_b_bits_source = buffer_auto_in_b_bits_source; // @[Buffer.scala:40:9] wire [7:0] buffer_nodeIn_b_bits_mask; // @[MixedNode.scala:551:17] wire [31:0] tlOtherMastersNodeOut_b_bits_address = buffer_auto_in_b_bits_address; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeIn_b_bits_data; // @[MixedNode.scala:551:17] wire [7:0] tlOtherMastersNodeOut_b_bits_mask = buffer_auto_in_b_bits_mask; // @[Buffer.scala:40:9] wire buffer_nodeIn_b_bits_corrupt; // @[MixedNode.scala:551:17] wire [63:0] tlOtherMastersNodeOut_b_bits_data = buffer_auto_in_b_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeIn_c_ready; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_b_bits_corrupt = buffer_auto_in_b_bits_corrupt; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_c_ready = buffer_auto_in_c_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_c_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_c_valid = buffer_auto_in_c_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_c_bits_opcode = buffer_auto_in_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_c_bits_param; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_c_bits_param = buffer_auto_in_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeOut_c_bits_size; // @[MixedNode.scala:542:17] wire [3:0] buffer_nodeIn_c_bits_size = buffer_auto_in_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeOut_c_bits_source; // @[MixedNode.scala:542:17] wire [1:0] buffer_nodeIn_c_bits_source = buffer_auto_in_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeOut_c_bits_address; // @[MixedNode.scala:542:17] wire [31:0] buffer_nodeIn_c_bits_address = buffer_auto_in_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeOut_c_bits_data; // @[MixedNode.scala:542:17] wire [63:0] buffer_nodeIn_c_bits_data = buffer_auto_in_c_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_d_ready; // @[MixedNode.scala:542:17] wire buffer_nodeIn_d_ready = buffer_auto_in_d_ready; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] buffer_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_d_valid = buffer_auto_in_d_valid; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] tlOtherMastersNodeOut_d_bits_opcode = buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_d_bits_param = buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire [3:0] tlOtherMastersNodeOut_d_bits_size = buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_d_bits_source = buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [2:0] tlOtherMastersNodeOut_d_bits_sink = buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_d_bits_denied = buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire [63:0] tlOtherMastersNodeOut_d_bits_data = buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeIn_e_ready; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_d_bits_corrupt = buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_e_ready = buffer_auto_in_e_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_e_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_e_valid = buffer_auto_in_e_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_e_bits_sink; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_e_bits_sink = buffer_auto_in_e_bits_sink; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_ready = buffer_auto_out_a_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_valid; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_valid_0 = buffer_auto_out_a_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_opcode_0 = buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_a_bits_param; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_param_0 = buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_a_bits_size; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_size_0 = buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_a_bits_source; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_source_0 = buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeOut_a_bits_address; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_address_0 = buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] buffer_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_mask_0 = buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_a_bits_data; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_data_0 = buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_b_ready; // @[MixedNode.scala:542:17] assign auto_buffer_out_b_ready_0 = buffer_auto_out_b_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_b_valid = buffer_auto_out_b_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_b_bits_opcode = buffer_auto_out_b_bits_opcode; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_b_bits_param = buffer_auto_out_b_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_b_bits_size = buffer_auto_out_b_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_b_bits_source = buffer_auto_out_b_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeOut_b_bits_address = buffer_auto_out_b_bits_address; // @[Buffer.scala:40:9] wire [7:0] buffer_nodeOut_b_bits_mask = buffer_auto_out_b_bits_mask; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_b_bits_data = buffer_auto_out_b_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_b_bits_corrupt = buffer_auto_out_b_bits_corrupt; // @[Buffer.scala:40:9] wire buffer_nodeOut_c_ready = buffer_auto_out_c_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_c_valid; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_valid_0 = buffer_auto_out_c_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_opcode_0 = buffer_auto_out_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_c_bits_param; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_param_0 = buffer_auto_out_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_c_bits_size; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_size_0 = buffer_auto_out_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_c_bits_source; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_source_0 = buffer_auto_out_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeOut_c_bits_address; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_address_0 = buffer_auto_out_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_c_bits_data; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_data_0 = buffer_auto_out_c_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_ready; // @[MixedNode.scala:542:17] assign auto_buffer_out_d_ready_0 = buffer_auto_out_d_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_valid = buffer_auto_out_d_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_d_bits_opcode = buffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_d_bits_param = buffer_auto_out_d_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_d_bits_size = buffer_auto_out_d_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_d_bits_source = buffer_auto_out_d_bits_source; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_d_bits_sink = buffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_denied = buffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_d_bits_data = buffer_auto_out_d_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_corrupt = buffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9] wire buffer_nodeOut_e_ready = buffer_auto_out_e_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_e_valid; // @[MixedNode.scala:542:17] assign auto_buffer_out_e_valid_0 = buffer_auto_out_e_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_e_bits_sink; // @[MixedNode.scala:542:17] assign auto_buffer_out_e_bits_sink_0 = buffer_auto_out_e_bits_sink; // @[Buffer.scala:40:9] assign buffer_nodeIn_a_ready = buffer_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_a_valid = buffer_nodeOut_a_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_opcode = buffer_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_param = buffer_nodeOut_a_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_size = buffer_nodeOut_a_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_source = buffer_nodeOut_a_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_address = buffer_nodeOut_a_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_mask = buffer_nodeOut_a_bits_mask; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_data = buffer_nodeOut_a_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_out_b_ready = buffer_nodeOut_b_ready; // @[Buffer.scala:40:9] assign buffer_nodeIn_b_valid = buffer_nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_opcode = buffer_nodeOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_param = buffer_nodeOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_size = buffer_nodeOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_source = buffer_nodeOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_address = buffer_nodeOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_mask = buffer_nodeOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_data = buffer_nodeOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_corrupt = buffer_nodeOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_c_ready = buffer_nodeOut_c_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_c_valid = buffer_nodeOut_c_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_opcode = buffer_nodeOut_c_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_param = buffer_nodeOut_c_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_size = buffer_nodeOut_c_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_source = buffer_nodeOut_c_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_address = buffer_nodeOut_c_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_data = buffer_nodeOut_c_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_out_d_ready = buffer_nodeOut_d_ready; // @[Buffer.scala:40:9] assign buffer_nodeIn_d_valid = buffer_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_opcode = buffer_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_param = buffer_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_size = buffer_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_source = buffer_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_sink = buffer_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_denied = buffer_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_data = buffer_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_corrupt = buffer_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_e_ready = buffer_nodeOut_e_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_e_valid = buffer_nodeOut_e_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_e_bits_sink = buffer_nodeOut_e_bits_sink; // @[Buffer.scala:40:9] assign buffer_auto_in_a_ready = buffer_nodeIn_a_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_a_valid = buffer_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_opcode = buffer_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_param = buffer_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_size = buffer_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_source = buffer_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_address = buffer_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_mask = buffer_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_data = buffer_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_b_ready = buffer_nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_b_valid = buffer_nodeIn_b_valid; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_opcode = buffer_nodeIn_b_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_param = buffer_nodeIn_b_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_size = buffer_nodeIn_b_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_source = buffer_nodeIn_b_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_address = buffer_nodeIn_b_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_mask = buffer_nodeIn_b_bits_mask; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_data = buffer_nodeIn_b_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_corrupt = buffer_nodeIn_b_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_auto_in_c_ready = buffer_nodeIn_c_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_c_valid = buffer_nodeIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_opcode = buffer_nodeIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_param = buffer_nodeIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_size = buffer_nodeIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_source = buffer_nodeIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_address = buffer_nodeIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_data = buffer_nodeIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_d_ready = buffer_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_d_valid = buffer_nodeIn_d_valid; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_opcode = buffer_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_param = buffer_nodeIn_d_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_size = buffer_nodeIn_d_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_source = buffer_nodeIn_d_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_sink = buffer_nodeIn_d_bits_sink; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_denied = buffer_nodeIn_d_bits_denied; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_data = buffer_nodeIn_d_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_corrupt = buffer_nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_auto_in_e_ready = buffer_nodeIn_e_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_e_valid = buffer_nodeIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_e_bits_sink = buffer_nodeIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_a_ready = tlOtherMastersNodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_a_valid; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_valid = tlOtherMastersNodeOut_a_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_opcode = tlOtherMastersNodeOut_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_a_bits_param; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_param = tlOtherMastersNodeOut_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeIn_a_bits_size; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_size = tlOtherMastersNodeOut_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeIn_a_bits_source; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_source = tlOtherMastersNodeOut_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeIn_a_bits_address; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_address = tlOtherMastersNodeOut_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] tlOtherMastersNodeIn_a_bits_mask; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_mask = tlOtherMastersNodeOut_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeIn_a_bits_data; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_data = tlOtherMastersNodeOut_a_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_b_ready; // @[MixedNode.scala:551:17] assign buffer_auto_in_b_ready = tlOtherMastersNodeOut_b_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_b_valid = tlOtherMastersNodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOtherMastersNodeIn_b_bits_opcode = tlOtherMastersNodeOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_b_bits_param = tlOtherMastersNodeOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlOtherMastersNodeIn_b_bits_size = tlOtherMastersNodeOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_b_bits_source = tlOtherMastersNodeOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlOtherMastersNodeIn_b_bits_address = tlOtherMastersNodeOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlOtherMastersNodeIn_b_bits_mask = tlOtherMastersNodeOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlOtherMastersNodeIn_b_bits_data = tlOtherMastersNodeOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_b_bits_corrupt = tlOtherMastersNodeOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_c_ready = tlOtherMastersNodeOut_c_ready; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_c_valid; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_valid = tlOtherMastersNodeOut_c_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_c_bits_opcode; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_opcode = tlOtherMastersNodeOut_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_c_bits_param; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_param = tlOtherMastersNodeOut_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeIn_c_bits_size; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_size = tlOtherMastersNodeOut_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeIn_c_bits_source; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_source = tlOtherMastersNodeOut_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeIn_c_bits_address; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_address = tlOtherMastersNodeOut_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeIn_c_bits_data; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_data = tlOtherMastersNodeOut_c_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_d_ready; // @[MixedNode.scala:551:17] assign buffer_auto_in_d_ready = tlOtherMastersNodeOut_d_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_d_valid = tlOtherMastersNodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOtherMastersNodeIn_d_bits_opcode = tlOtherMastersNodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_d_bits_param = tlOtherMastersNodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlOtherMastersNodeIn_d_bits_size = tlOtherMastersNodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_d_bits_source = tlOtherMastersNodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOtherMastersNodeIn_d_bits_sink = tlOtherMastersNodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_d_bits_denied = tlOtherMastersNodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlOtherMastersNodeIn_d_bits_data = tlOtherMastersNodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_d_bits_corrupt = tlOtherMastersNodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_e_ready = tlOtherMastersNodeOut_e_ready; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_e_valid; // @[MixedNode.scala:551:17] assign buffer_auto_in_e_valid = tlOtherMastersNodeOut_e_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_e_bits_sink; // @[MixedNode.scala:551:17] assign buffer_auto_in_e_bits_sink = tlOtherMastersNodeOut_e_bits_sink; // @[Buffer.scala:40:9] assign tlOtherMastersNodeOut_a_valid = tlOtherMastersNodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_opcode = tlOtherMastersNodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_param = tlOtherMastersNodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_size = tlOtherMastersNodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_source = tlOtherMastersNodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_address = tlOtherMastersNodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_mask = tlOtherMastersNodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_data = tlOtherMastersNodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_b_ready = tlOtherMastersNodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_valid = tlOtherMastersNodeIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_opcode = tlOtherMastersNodeIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_param = tlOtherMastersNodeIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_size = tlOtherMastersNodeIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_source = tlOtherMastersNodeIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_address = tlOtherMastersNodeIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_data = tlOtherMastersNodeIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_d_ready = tlOtherMastersNodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_e_valid = tlOtherMastersNodeIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_e_bits_sink = tlOtherMastersNodeIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign broadcast_auto_in = hartidOut; // @[MixedNode.scala:542:17] assign hartidOut = hartidIn; // @[MixedNode.scala:542:17, :551:17] assign auto_trace_source_out_insns_0_valid_0 = traceSourceNodeOut_insns_0_valid; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_iaddr_0 = traceSourceNodeOut_insns_0_iaddr; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_insn_0 = traceSourceNodeOut_insns_0_insn; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_priv_0 = traceSourceNodeOut_insns_0_priv; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_exception_0 = traceSourceNodeOut_insns_0_exception; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_interrupt_0 = traceSourceNodeOut_insns_0_interrupt; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_cause_0 = traceSourceNodeOut_insns_0_cause; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_tval_0 = traceSourceNodeOut_insns_0_tval; // @[RocketTile.scala:141:7] assign auto_trace_source_out_time_0 = traceSourceNodeOut_time; // @[RocketTile.scala:141:7] assign broadcast_2_auto_in_0_valid_0 = bpwatchSourceNodeOut_0_valid_0; // @[MixedNode.scala:542:17] assign broadcast_2_auto_in_0_action = bpwatchSourceNodeOut_0_action; // @[MixedNode.scala:542:17] wire int_localOut_0; // @[MixedNode.scala:542:17] wire x1_int_localOut_0; // @[MixedNode.scala:542:17] wire x1_int_localOut_1; // @[MixedNode.scala:542:17] wire x1_int_localOut_1_0; // @[MixedNode.scala:542:17] wire x1_int_localOut_2_0; // @[MixedNode.scala:542:17] assign int_localOut_0 = int_localIn_0; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_0 = x1_int_localIn_0; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_1 = x1_int_localIn_1; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_1_0 = x1_int_localIn_1_0; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_2_0 = x1_int_localIn_2_0; // @[MixedNode.scala:542:17, :551:17] wire intSinkNodeIn_0; // @[MixedNode.scala:551:17] wire intSinkNodeIn_1; // @[MixedNode.scala:551:17] wire intSinkNodeIn_2; // @[MixedNode.scala:551:17] wire intSinkNodeIn_3; // @[MixedNode.scala:551:17] wire intSinkNodeIn_4; // @[MixedNode.scala:551:17] assign auto_wfi_out_0_0 = wfiNodeOut_0; // @[RocketTile.scala:141:7] reg wfiNodeOut_0_REG; // @[Interrupts.scala:131:36] assign wfiNodeOut_0 = wfiNodeOut_0_REG; // @[Interrupts.scala:131:36] wire _core_io_rocc_busy_T = _cmdRouter_io_busy | _rerocc_client_io_busy; // @[RocketTile.scala:213:49] always @(posedge clock) begin // @[RocketTile.scala:141:7] if (reset) // @[RocketTile.scala:141:7] wfiNodeOut_0_REG <= 1'h0; // @[Interrupts.scala:131:36] else // @[RocketTile.scala:141:7] wfiNodeOut_0_REG <= _core_io_wfi; // @[RocketTile.scala:147:20] always @(posedge) TLXbar_MasterXbar_RocketTile_i2_o1_a32d64s2k3z4c tlMasterXbar ( // @[HierarchicalElement.scala:55:42] .clock (clock), .reset (reset), .auto_anon_in_1_a_ready (widget_1_auto_anon_out_a_ready), .auto_anon_in_1_a_valid (widget_1_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_1_a_bits_address (widget_1_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9] .auto_anon_in_1_d_valid (widget_1_auto_anon_out_d_valid), .auto_anon_in_1_d_bits_opcode (widget_1_auto_anon_out_d_bits_opcode), .auto_anon_in_1_d_bits_param (widget_1_auto_anon_out_d_bits_param), .auto_anon_in_1_d_bits_size (widget_1_auto_anon_out_d_bits_size), .auto_anon_in_1_d_bits_sink (widget_1_auto_anon_out_d_bits_sink), .auto_anon_in_1_d_bits_denied (widget_1_auto_anon_out_d_bits_denied), .auto_anon_in_1_d_bits_data (widget_1_auto_anon_out_d_bits_data), .auto_anon_in_1_d_bits_corrupt (widget_1_auto_anon_out_d_bits_corrupt), .auto_anon_in_0_a_ready (widget_auto_anon_out_a_ready), .auto_anon_in_0_a_valid (widget_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_opcode (widget_auto_anon_out_a_bits_opcode), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_param (widget_auto_anon_out_a_bits_param), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_size (widget_auto_anon_out_a_bits_size), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_source (widget_auto_anon_out_a_bits_source), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_address (widget_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_mask (widget_auto_anon_out_a_bits_mask), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_data (widget_auto_anon_out_a_bits_data), // @[WidthWidget.scala:27:9] .auto_anon_in_0_b_ready (widget_auto_anon_out_b_ready), // @[WidthWidget.scala:27:9] .auto_anon_in_0_b_valid (widget_auto_anon_out_b_valid), .auto_anon_in_0_b_bits_opcode (widget_auto_anon_out_b_bits_opcode), .auto_anon_in_0_b_bits_param (widget_auto_anon_out_b_bits_param), .auto_anon_in_0_b_bits_size (widget_auto_anon_out_b_bits_size), .auto_anon_in_0_b_bits_source (widget_auto_anon_out_b_bits_source), .auto_anon_in_0_b_bits_address (widget_auto_anon_out_b_bits_address), .auto_anon_in_0_b_bits_mask (widget_auto_anon_out_b_bits_mask), .auto_anon_in_0_b_bits_data (widget_auto_anon_out_b_bits_data), .auto_anon_in_0_b_bits_corrupt (widget_auto_anon_out_b_bits_corrupt), .auto_anon_in_0_c_ready (widget_auto_anon_out_c_ready), .auto_anon_in_0_c_valid (widget_auto_anon_out_c_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_opcode (widget_auto_anon_out_c_bits_opcode), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_param (widget_auto_anon_out_c_bits_param), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_size (widget_auto_anon_out_c_bits_size), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_source (widget_auto_anon_out_c_bits_source), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_address (widget_auto_anon_out_c_bits_address), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_data (widget_auto_anon_out_c_bits_data), // @[WidthWidget.scala:27:9] .auto_anon_in_0_d_ready (widget_auto_anon_out_d_ready), // @[WidthWidget.scala:27:9] .auto_anon_in_0_d_valid (widget_auto_anon_out_d_valid), .auto_anon_in_0_d_bits_opcode (widget_auto_anon_out_d_bits_opcode), .auto_anon_in_0_d_bits_param (widget_auto_anon_out_d_bits_param), .auto_anon_in_0_d_bits_size (widget_auto_anon_out_d_bits_size), .auto_anon_in_0_d_bits_source (widget_auto_anon_out_d_bits_source), .auto_anon_in_0_d_bits_sink (widget_auto_anon_out_d_bits_sink), .auto_anon_in_0_d_bits_denied (widget_auto_anon_out_d_bits_denied), .auto_anon_in_0_d_bits_data (widget_auto_anon_out_d_bits_data), .auto_anon_in_0_d_bits_corrupt (widget_auto_anon_out_d_bits_corrupt), .auto_anon_in_0_e_ready (widget_auto_anon_out_e_ready), .auto_anon_in_0_e_valid (widget_auto_anon_out_e_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_0_e_bits_sink (widget_auto_anon_out_e_bits_sink), // @[WidthWidget.scala:27:9] .auto_anon_out_a_ready (tlOtherMastersNodeIn_a_ready), // @[MixedNode.scala:551:17] .auto_anon_out_a_valid (tlOtherMastersNodeIn_a_valid), .auto_anon_out_a_bits_opcode (tlOtherMastersNodeIn_a_bits_opcode), .auto_anon_out_a_bits_param (tlOtherMastersNodeIn_a_bits_param), .auto_anon_out_a_bits_size (tlOtherMastersNodeIn_a_bits_size), .auto_anon_out_a_bits_source (tlOtherMastersNodeIn_a_bits_source), .auto_anon_out_a_bits_address (tlOtherMastersNodeIn_a_bits_address), .auto_anon_out_a_bits_mask (tlOtherMastersNodeIn_a_bits_mask), .auto_anon_out_a_bits_data (tlOtherMastersNodeIn_a_bits_data), .auto_anon_out_b_ready (tlOtherMastersNodeIn_b_ready), .auto_anon_out_b_valid (tlOtherMastersNodeIn_b_valid), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_opcode (tlOtherMastersNodeIn_b_bits_opcode), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_param (tlOtherMastersNodeIn_b_bits_param), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_size (tlOtherMastersNodeIn_b_bits_size), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_source (tlOtherMastersNodeIn_b_bits_source), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_address (tlOtherMastersNodeIn_b_bits_address), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_mask (tlOtherMastersNodeIn_b_bits_mask), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_data (tlOtherMastersNodeIn_b_bits_data), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_corrupt (tlOtherMastersNodeIn_b_bits_corrupt), // @[MixedNode.scala:551:17] .auto_anon_out_c_ready (tlOtherMastersNodeIn_c_ready), // @[MixedNode.scala:551:17] .auto_anon_out_c_valid (tlOtherMastersNodeIn_c_valid), .auto_anon_out_c_bits_opcode (tlOtherMastersNodeIn_c_bits_opcode), .auto_anon_out_c_bits_param (tlOtherMastersNodeIn_c_bits_param), .auto_anon_out_c_bits_size (tlOtherMastersNodeIn_c_bits_size), .auto_anon_out_c_bits_source (tlOtherMastersNodeIn_c_bits_source), .auto_anon_out_c_bits_address (tlOtherMastersNodeIn_c_bits_address), .auto_anon_out_c_bits_data (tlOtherMastersNodeIn_c_bits_data), .auto_anon_out_d_ready (tlOtherMastersNodeIn_d_ready), .auto_anon_out_d_valid (tlOtherMastersNodeIn_d_valid), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_opcode (tlOtherMastersNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_param (tlOtherMastersNodeIn_d_bits_param), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_size (tlOtherMastersNodeIn_d_bits_size), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_source (tlOtherMastersNodeIn_d_bits_source), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_sink (tlOtherMastersNodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_denied (tlOtherMastersNodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_data (tlOtherMastersNodeIn_d_bits_data), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_corrupt (tlOtherMastersNodeIn_d_bits_corrupt), // @[MixedNode.scala:551:17] .auto_anon_out_e_ready (tlOtherMastersNodeIn_e_ready), // @[MixedNode.scala:551:17] .auto_anon_out_e_valid (tlOtherMastersNodeIn_e_valid), .auto_anon_out_e_bits_sink (tlOtherMastersNodeIn_e_bits_sink) ); // @[HierarchicalElement.scala:55:42] TLXbar_SlaveXbar_RocketTile_i0_o0_a1d8s1k1z1u tlSlaveXbar ( // @[HierarchicalElement.scala:56:41] .clock (clock), .reset (reset) ); // @[HierarchicalElement.scala:56:41] IntXbar_i4_o1 intXbar ( // @[HierarchicalElement.scala:57:37] .auto_anon_in_3_0 (x1_int_localOut_2_0), // @[MixedNode.scala:542:17] .auto_anon_in_2_0 (x1_int_localOut_1_0), // @[MixedNode.scala:542:17] .auto_anon_in_1_0 (x1_int_localOut_0), // @[MixedNode.scala:542:17] .auto_anon_in_1_1 (x1_int_localOut_1), // @[MixedNode.scala:542:17] .auto_anon_in_0_0 (int_localOut_0), // @[MixedNode.scala:542:17] .auto_anon_out_0 (intSinkNodeIn_0), .auto_anon_out_1 (intSinkNodeIn_1), .auto_anon_out_2 (intSinkNodeIn_2), .auto_anon_out_3 (intSinkNodeIn_3), .auto_anon_out_4 (intSinkNodeIn_4) ); // @[HierarchicalElement.scala:57:37] DCache dcache ( // @[HellaCache.scala:278:43] .clock (clock), .reset (reset), .auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9] .auto_out_a_valid (widget_auto_anon_in_a_valid), .auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode), .auto_out_a_bits_param (widget_auto_anon_in_a_bits_param), .auto_out_a_bits_size (widget_auto_anon_in_a_bits_size), .auto_out_a_bits_source (widget_auto_anon_in_a_bits_source), .auto_out_a_bits_address (widget_auto_anon_in_a_bits_address), .auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask), .auto_out_a_bits_data (widget_auto_anon_in_a_bits_data), .auto_out_b_ready (widget_auto_anon_in_b_ready), .auto_out_b_valid (widget_auto_anon_in_b_valid), // @[WidthWidget.scala:27:9] .auto_out_b_bits_opcode (widget_auto_anon_in_b_bits_opcode), // @[WidthWidget.scala:27:9] .auto_out_b_bits_param (widget_auto_anon_in_b_bits_param), // @[WidthWidget.scala:27:9] .auto_out_b_bits_size (widget_auto_anon_in_b_bits_size), // @[WidthWidget.scala:27:9] .auto_out_b_bits_source (widget_auto_anon_in_b_bits_source), // @[WidthWidget.scala:27:9] .auto_out_b_bits_address (widget_auto_anon_in_b_bits_address), // @[WidthWidget.scala:27:9] .auto_out_b_bits_mask (widget_auto_anon_in_b_bits_mask), // @[WidthWidget.scala:27:9] .auto_out_b_bits_data (widget_auto_anon_in_b_bits_data), // @[WidthWidget.scala:27:9] .auto_out_b_bits_corrupt (widget_auto_anon_in_b_bits_corrupt), // @[WidthWidget.scala:27:9] .auto_out_c_ready (widget_auto_anon_in_c_ready), // @[WidthWidget.scala:27:9] .auto_out_c_valid (widget_auto_anon_in_c_valid), .auto_out_c_bits_opcode (widget_auto_anon_in_c_bits_opcode), .auto_out_c_bits_param (widget_auto_anon_in_c_bits_param), .auto_out_c_bits_size (widget_auto_anon_in_c_bits_size), .auto_out_c_bits_source (widget_auto_anon_in_c_bits_source), .auto_out_c_bits_address (widget_auto_anon_in_c_bits_address), .auto_out_c_bits_data (widget_auto_anon_in_c_bits_data), .auto_out_d_ready (widget_auto_anon_in_d_ready), .auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9] .auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9] .auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9] .auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9] .auto_out_d_bits_source (widget_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9] .auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9] .auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9] .auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9] .auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9] .auto_out_e_ready (widget_auto_anon_in_e_ready), // @[WidthWidget.scala:27:9] .auto_out_e_valid (widget_auto_anon_in_e_valid), .auto_out_e_bits_sink (widget_auto_anon_in_e_bits_sink), .io_cpu_req_ready (_dcache_io_cpu_req_ready), .io_cpu_req_valid (_dcacheArb_io_mem_req_valid), // @[HellaCache.scala:292:25] .io_cpu_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), // @[HellaCache.scala:292:25] .io_cpu_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), // @[HellaCache.scala:292:25] .io_cpu_req_bits_cmd (_dcacheArb_io_mem_req_bits_cmd), // @[HellaCache.scala:292:25] .io_cpu_req_bits_size (_dcacheArb_io_mem_req_bits_size), // @[HellaCache.scala:292:25] .io_cpu_req_bits_signed (_dcacheArb_io_mem_req_bits_signed), // @[HellaCache.scala:292:25] .io_cpu_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), // @[HellaCache.scala:292:25] .io_cpu_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), // @[HellaCache.scala:292:25] .io_cpu_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), // @[HellaCache.scala:292:25] .io_cpu_req_bits_no_resp (_dcacheArb_io_mem_req_bits_no_resp), // @[HellaCache.scala:292:25] .io_cpu_s1_kill (_dcacheArb_io_mem_s1_kill), // @[HellaCache.scala:292:25] .io_cpu_s1_data_data (_dcacheArb_io_mem_s1_data_data), // @[HellaCache.scala:292:25] .io_cpu_s1_data_mask (_dcacheArb_io_mem_s1_data_mask), // @[HellaCache.scala:292:25] .io_cpu_s2_nack (_dcache_io_cpu_s2_nack), .io_cpu_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), .io_cpu_s2_uncached (_dcache_io_cpu_s2_uncached), .io_cpu_s2_paddr (_dcache_io_cpu_s2_paddr), .io_cpu_resp_valid (_dcache_io_cpu_resp_valid), .io_cpu_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), .io_cpu_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), .io_cpu_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), .io_cpu_resp_bits_size (_dcache_io_cpu_resp_bits_size), .io_cpu_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), .io_cpu_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), .io_cpu_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), .io_cpu_resp_bits_data (_dcache_io_cpu_resp_bits_data), .io_cpu_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), .io_cpu_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), .io_cpu_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), .io_cpu_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), .io_cpu_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), .io_cpu_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), .io_cpu_replay_next (_dcache_io_cpu_replay_next), .io_cpu_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), .io_cpu_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), .io_cpu_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), .io_cpu_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), .io_cpu_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), .io_cpu_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), .io_cpu_s2_gpa (_dcache_io_cpu_s2_gpa), .io_cpu_ordered (_dcache_io_cpu_ordered), .io_cpu_store_pending (_dcache_io_cpu_store_pending), .io_cpu_perf_acquire (_dcache_io_cpu_perf_acquire), .io_cpu_perf_release (_dcache_io_cpu_perf_release), .io_cpu_perf_grant (_dcache_io_cpu_perf_grant), .io_cpu_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss), .io_cpu_perf_blocked (_dcache_io_cpu_perf_blocked), .io_cpu_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), .io_cpu_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), .io_cpu_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), .io_cpu_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), .io_cpu_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore), .io_cpu_keep_clock_enabled (_dcacheArb_io_mem_keep_clock_enabled), // @[HellaCache.scala:292:25] .io_ptw_req_ready (_ptw_io_requestor_2_req_ready), // @[PTW.scala:802:19] .io_ptw_req_valid (_dcache_io_ptw_req_valid), .io_ptw_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), .io_ptw_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), .io_ptw_resp_valid (_ptw_io_requestor_2_resp_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_2_resp_bits_ae_ptw), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_final (_ptw_io_requestor_2_resp_bits_ae_final), // @[PTW.scala:802:19] .io_ptw_resp_bits_pf (_ptw_io_requestor_2_resp_bits_pf), // @[PTW.scala:802:19] .io_ptw_resp_bits_gf (_ptw_io_requestor_2_resp_bits_gf), // @[PTW.scala:802:19] .io_ptw_resp_bits_hr (_ptw_io_requestor_2_resp_bits_hr), // @[PTW.scala:802:19] .io_ptw_resp_bits_hw (_ptw_io_requestor_2_resp_bits_hw), // @[PTW.scala:802:19] .io_ptw_resp_bits_hx (_ptw_io_requestor_2_resp_bits_hx), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_2_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_2_resp_bits_pte_ppn), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_2_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_d (_ptw_io_requestor_2_resp_bits_pte_d), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_a (_ptw_io_requestor_2_resp_bits_pte_a), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_g (_ptw_io_requestor_2_resp_bits_pte_g), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_u (_ptw_io_requestor_2_resp_bits_pte_u), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_x (_ptw_io_requestor_2_resp_bits_pte_x), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_w (_ptw_io_requestor_2_resp_bits_pte_w), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_r (_ptw_io_requestor_2_resp_bits_pte_r), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_v (_ptw_io_requestor_2_resp_bits_pte_v), // @[PTW.scala:802:19] .io_ptw_resp_bits_level (_ptw_io_requestor_2_resp_bits_level), // @[PTW.scala:802:19] .io_ptw_resp_bits_homogeneous (_ptw_io_requestor_2_resp_bits_homogeneous), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_2_resp_bits_gpa_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_2_resp_bits_gpa_bits), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_2_resp_bits_gpa_is_pte), // @[PTW.scala:802:19] .io_ptw_ptbr_mode (_ptw_io_requestor_2_ptbr_mode), // @[PTW.scala:802:19] .io_ptw_ptbr_ppn (_ptw_io_requestor_2_ptbr_ppn), // @[PTW.scala:802:19] .io_ptw_status_debug (_ptw_io_requestor_2_status_debug), // @[PTW.scala:802:19] .io_ptw_status_cease (_ptw_io_requestor_2_status_cease), // @[PTW.scala:802:19] .io_ptw_status_wfi (_ptw_io_requestor_2_status_wfi), // @[PTW.scala:802:19] .io_ptw_status_isa (_ptw_io_requestor_2_status_isa), // @[PTW.scala:802:19] .io_ptw_status_dprv (_ptw_io_requestor_2_status_dprv), // @[PTW.scala:802:19] .io_ptw_status_dv (_ptw_io_requestor_2_status_dv), // @[PTW.scala:802:19] .io_ptw_status_prv (_ptw_io_requestor_2_status_prv), // @[PTW.scala:802:19] .io_ptw_status_v (_ptw_io_requestor_2_status_v), // @[PTW.scala:802:19] .io_ptw_status_mpv (_ptw_io_requestor_2_status_mpv), // @[PTW.scala:802:19] .io_ptw_status_gva (_ptw_io_requestor_2_status_gva), // @[PTW.scala:802:19] .io_ptw_status_tsr (_ptw_io_requestor_2_status_tsr), // @[PTW.scala:802:19] .io_ptw_status_tw (_ptw_io_requestor_2_status_tw), // @[PTW.scala:802:19] .io_ptw_status_tvm (_ptw_io_requestor_2_status_tvm), // @[PTW.scala:802:19] .io_ptw_status_mxr (_ptw_io_requestor_2_status_mxr), // @[PTW.scala:802:19] .io_ptw_status_sum (_ptw_io_requestor_2_status_sum), // @[PTW.scala:802:19] .io_ptw_status_mprv (_ptw_io_requestor_2_status_mprv), // @[PTW.scala:802:19] .io_ptw_status_fs (_ptw_io_requestor_2_status_fs), // @[PTW.scala:802:19] .io_ptw_status_mpp (_ptw_io_requestor_2_status_mpp), // @[PTW.scala:802:19] .io_ptw_status_spp (_ptw_io_requestor_2_status_spp), // @[PTW.scala:802:19] .io_ptw_status_mpie (_ptw_io_requestor_2_status_mpie), // @[PTW.scala:802:19] .io_ptw_status_spie (_ptw_io_requestor_2_status_spie), // @[PTW.scala:802:19] .io_ptw_status_mie (_ptw_io_requestor_2_status_mie), // @[PTW.scala:802:19] .io_ptw_status_sie (_ptw_io_requestor_2_status_sie), // @[PTW.scala:802:19] .io_ptw_hstatus_spvp (_ptw_io_requestor_2_hstatus_spvp), // @[PTW.scala:802:19] .io_ptw_hstatus_spv (_ptw_io_requestor_2_hstatus_spv), // @[PTW.scala:802:19] .io_ptw_hstatus_gva (_ptw_io_requestor_2_hstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_debug (_ptw_io_requestor_2_gstatus_debug), // @[PTW.scala:802:19] .io_ptw_gstatus_cease (_ptw_io_requestor_2_gstatus_cease), // @[PTW.scala:802:19] .io_ptw_gstatus_wfi (_ptw_io_requestor_2_gstatus_wfi), // @[PTW.scala:802:19] .io_ptw_gstatus_isa (_ptw_io_requestor_2_gstatus_isa), // @[PTW.scala:802:19] .io_ptw_gstatus_dprv (_ptw_io_requestor_2_gstatus_dprv), // @[PTW.scala:802:19] .io_ptw_gstatus_dv (_ptw_io_requestor_2_gstatus_dv), // @[PTW.scala:802:19] .io_ptw_gstatus_prv (_ptw_io_requestor_2_gstatus_prv), // @[PTW.scala:802:19] .io_ptw_gstatus_v (_ptw_io_requestor_2_gstatus_v), // @[PTW.scala:802:19] .io_ptw_gstatus_zero2 (_ptw_io_requestor_2_gstatus_zero2), // @[PTW.scala:802:19] .io_ptw_gstatus_mpv (_ptw_io_requestor_2_gstatus_mpv), // @[PTW.scala:802:19] .io_ptw_gstatus_gva (_ptw_io_requestor_2_gstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_mbe (_ptw_io_requestor_2_gstatus_mbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sbe (_ptw_io_requestor_2_gstatus_sbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sxl (_ptw_io_requestor_2_gstatus_sxl), // @[PTW.scala:802:19] .io_ptw_gstatus_zero1 (_ptw_io_requestor_2_gstatus_zero1), // @[PTW.scala:802:19] .io_ptw_gstatus_tsr (_ptw_io_requestor_2_gstatus_tsr), // @[PTW.scala:802:19] .io_ptw_gstatus_tw (_ptw_io_requestor_2_gstatus_tw), // @[PTW.scala:802:19] .io_ptw_gstatus_tvm (_ptw_io_requestor_2_gstatus_tvm), // @[PTW.scala:802:19] .io_ptw_gstatus_mxr (_ptw_io_requestor_2_gstatus_mxr), // @[PTW.scala:802:19] .io_ptw_gstatus_sum (_ptw_io_requestor_2_gstatus_sum), // @[PTW.scala:802:19] .io_ptw_gstatus_mprv (_ptw_io_requestor_2_gstatus_mprv), // @[PTW.scala:802:19] .io_ptw_gstatus_fs (_ptw_io_requestor_2_gstatus_fs), // @[PTW.scala:802:19] .io_ptw_gstatus_mpp (_ptw_io_requestor_2_gstatus_mpp), // @[PTW.scala:802:19] .io_ptw_gstatus_vs (_ptw_io_requestor_2_gstatus_vs), // @[PTW.scala:802:19] .io_ptw_gstatus_spp (_ptw_io_requestor_2_gstatus_spp), // @[PTW.scala:802:19] .io_ptw_gstatus_mpie (_ptw_io_requestor_2_gstatus_mpie), // @[PTW.scala:802:19] .io_ptw_gstatus_ube (_ptw_io_requestor_2_gstatus_ube), // @[PTW.scala:802:19] .io_ptw_gstatus_spie (_ptw_io_requestor_2_gstatus_spie), // @[PTW.scala:802:19] .io_ptw_gstatus_upie (_ptw_io_requestor_2_gstatus_upie), // @[PTW.scala:802:19] .io_ptw_gstatus_mie (_ptw_io_requestor_2_gstatus_mie), // @[PTW.scala:802:19] .io_ptw_gstatus_hie (_ptw_io_requestor_2_gstatus_hie), // @[PTW.scala:802:19] .io_ptw_gstatus_sie (_ptw_io_requestor_2_gstatus_sie), // @[PTW.scala:802:19] .io_ptw_gstatus_uie (_ptw_io_requestor_2_gstatus_uie), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_l (_ptw_io_requestor_2_pmp_0_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_a (_ptw_io_requestor_2_pmp_0_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_x (_ptw_io_requestor_2_pmp_0_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_w (_ptw_io_requestor_2_pmp_0_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_r (_ptw_io_requestor_2_pmp_0_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_0_addr (_ptw_io_requestor_2_pmp_0_addr), // @[PTW.scala:802:19] .io_ptw_pmp_0_mask (_ptw_io_requestor_2_pmp_0_mask), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_l (_ptw_io_requestor_2_pmp_1_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_a (_ptw_io_requestor_2_pmp_1_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_x (_ptw_io_requestor_2_pmp_1_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_w (_ptw_io_requestor_2_pmp_1_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_r (_ptw_io_requestor_2_pmp_1_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_1_addr (_ptw_io_requestor_2_pmp_1_addr), // @[PTW.scala:802:19] .io_ptw_pmp_1_mask (_ptw_io_requestor_2_pmp_1_mask), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_l (_ptw_io_requestor_2_pmp_2_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_a (_ptw_io_requestor_2_pmp_2_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_x (_ptw_io_requestor_2_pmp_2_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_w (_ptw_io_requestor_2_pmp_2_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_r (_ptw_io_requestor_2_pmp_2_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_2_addr (_ptw_io_requestor_2_pmp_2_addr), // @[PTW.scala:802:19] .io_ptw_pmp_2_mask (_ptw_io_requestor_2_pmp_2_mask), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_l (_ptw_io_requestor_2_pmp_3_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_a (_ptw_io_requestor_2_pmp_3_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_x (_ptw_io_requestor_2_pmp_3_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_w (_ptw_io_requestor_2_pmp_3_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_r (_ptw_io_requestor_2_pmp_3_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_3_addr (_ptw_io_requestor_2_pmp_3_addr), // @[PTW.scala:802:19] .io_ptw_pmp_3_mask (_ptw_io_requestor_2_pmp_3_mask), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_l (_ptw_io_requestor_2_pmp_4_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_a (_ptw_io_requestor_2_pmp_4_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_x (_ptw_io_requestor_2_pmp_4_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_w (_ptw_io_requestor_2_pmp_4_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_r (_ptw_io_requestor_2_pmp_4_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_4_addr (_ptw_io_requestor_2_pmp_4_addr), // @[PTW.scala:802:19] .io_ptw_pmp_4_mask (_ptw_io_requestor_2_pmp_4_mask), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_l (_ptw_io_requestor_2_pmp_5_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_a (_ptw_io_requestor_2_pmp_5_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_x (_ptw_io_requestor_2_pmp_5_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_w (_ptw_io_requestor_2_pmp_5_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_r (_ptw_io_requestor_2_pmp_5_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_5_addr (_ptw_io_requestor_2_pmp_5_addr), // @[PTW.scala:802:19] .io_ptw_pmp_5_mask (_ptw_io_requestor_2_pmp_5_mask), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_l (_ptw_io_requestor_2_pmp_6_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_a (_ptw_io_requestor_2_pmp_6_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_x (_ptw_io_requestor_2_pmp_6_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_w (_ptw_io_requestor_2_pmp_6_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_r (_ptw_io_requestor_2_pmp_6_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_6_addr (_ptw_io_requestor_2_pmp_6_addr), // @[PTW.scala:802:19] .io_ptw_pmp_6_mask (_ptw_io_requestor_2_pmp_6_mask), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_l (_ptw_io_requestor_2_pmp_7_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_a (_ptw_io_requestor_2_pmp_7_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_x (_ptw_io_requestor_2_pmp_7_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_w (_ptw_io_requestor_2_pmp_7_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_r (_ptw_io_requestor_2_pmp_7_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_7_addr (_ptw_io_requestor_2_pmp_7_addr), // @[PTW.scala:802:19] .io_ptw_pmp_7_mask (_ptw_io_requestor_2_pmp_7_mask), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_ren (_ptw_io_requestor_2_customCSRs_csrs_0_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wen (_ptw_io_requestor_2_customCSRs_csrs_0_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wdata (_ptw_io_requestor_2_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_value (_ptw_io_requestor_2_customCSRs_csrs_0_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_ren (_ptw_io_requestor_2_customCSRs_csrs_1_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wen (_ptw_io_requestor_2_customCSRs_csrs_1_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wdata (_ptw_io_requestor_2_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_value (_ptw_io_requestor_2_customCSRs_csrs_1_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_ren (_ptw_io_requestor_2_customCSRs_csrs_2_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wen (_ptw_io_requestor_2_customCSRs_csrs_2_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wdata (_ptw_io_requestor_2_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_value (_ptw_io_requestor_2_customCSRs_csrs_2_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_ren (_ptw_io_requestor_2_customCSRs_csrs_3_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wen (_ptw_io_requestor_2_customCSRs_csrs_3_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wdata (_ptw_io_requestor_2_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_value (_ptw_io_requestor_2_customCSRs_csrs_3_value) // @[PTW.scala:802:19] ); // @[HellaCache.scala:278:43] ReRoCCClient rerocc_client ( // @[Configs.scala:14:35] .clock (clock), .reset (reset), .auto_re_ro_cc_out_req_ready (_rerocc_buffer_auto_in_req_ready), // @[Protocol.scala:134:35] .auto_re_ro_cc_out_req_valid (_rerocc_client_auto_re_ro_cc_out_req_valid), .auto_re_ro_cc_out_req_bits_opcode (_rerocc_client_auto_re_ro_cc_out_req_bits_opcode), .auto_re_ro_cc_out_req_bits_client_id (_rerocc_client_auto_re_ro_cc_out_req_bits_client_id), .auto_re_ro_cc_out_req_bits_manager_id (_rerocc_client_auto_re_ro_cc_out_req_bits_manager_id), .auto_re_ro_cc_out_req_bits_data (_rerocc_client_auto_re_ro_cc_out_req_bits_data), .auto_re_ro_cc_out_resp_ready (_rerocc_client_auto_re_ro_cc_out_resp_ready), .auto_re_ro_cc_out_resp_valid (_rerocc_buffer_auto_in_resp_valid), // @[Protocol.scala:134:35] .auto_re_ro_cc_out_resp_bits_opcode (_rerocc_buffer_auto_in_resp_bits_opcode), // @[Protocol.scala:134:35] .auto_re_ro_cc_out_resp_bits_client_id (_rerocc_buffer_auto_in_resp_bits_client_id), // @[Protocol.scala:134:35] .auto_re_ro_cc_out_resp_bits_manager_id (_rerocc_buffer_auto_in_resp_bits_manager_id), // @[Protocol.scala:134:35] .auto_re_ro_cc_out_resp_bits_data (_rerocc_buffer_auto_in_resp_bits_data), // @[Protocol.scala:134:35] .io_cmd_ready (_rerocc_client_io_cmd_ready), .io_cmd_valid (_cmdRouter_io_out_0_valid), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_funct (_cmdRouter_io_out_0_bits_inst_funct), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_rs2 (_cmdRouter_io_out_0_bits_inst_rs2), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_rs1 (_cmdRouter_io_out_0_bits_inst_rs1), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_xd (_cmdRouter_io_out_0_bits_inst_xd), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_xs1 (_cmdRouter_io_out_0_bits_inst_xs1), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_xs2 (_cmdRouter_io_out_0_bits_inst_xs2), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_rd (_cmdRouter_io_out_0_bits_inst_rd), // @[LazyRoCC.scala:102:27] .io_cmd_bits_inst_opcode (_cmdRouter_io_out_0_bits_inst_opcode), // @[LazyRoCC.scala:102:27] .io_cmd_bits_rs1 (_cmdRouter_io_out_0_bits_rs1), // @[LazyRoCC.scala:102:27] .io_cmd_bits_rs2 (_cmdRouter_io_out_0_bits_rs2), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_debug (_cmdRouter_io_out_0_bits_status_debug), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_cease (_cmdRouter_io_out_0_bits_status_cease), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_wfi (_cmdRouter_io_out_0_bits_status_wfi), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_isa (_cmdRouter_io_out_0_bits_status_isa), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_dprv (_cmdRouter_io_out_0_bits_status_dprv), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_dv (_cmdRouter_io_out_0_bits_status_dv), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_prv (_cmdRouter_io_out_0_bits_status_prv), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_v (_cmdRouter_io_out_0_bits_status_v), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_sd (_cmdRouter_io_out_0_bits_status_sd), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_zero2 (_cmdRouter_io_out_0_bits_status_zero2), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mpv (_cmdRouter_io_out_0_bits_status_mpv), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_gva (_cmdRouter_io_out_0_bits_status_gva), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mbe (_cmdRouter_io_out_0_bits_status_mbe), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_sbe (_cmdRouter_io_out_0_bits_status_sbe), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_sxl (_cmdRouter_io_out_0_bits_status_sxl), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_uxl (_cmdRouter_io_out_0_bits_status_uxl), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_sd_rv32 (_cmdRouter_io_out_0_bits_status_sd_rv32), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_zero1 (_cmdRouter_io_out_0_bits_status_zero1), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_tsr (_cmdRouter_io_out_0_bits_status_tsr), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_tw (_cmdRouter_io_out_0_bits_status_tw), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_tvm (_cmdRouter_io_out_0_bits_status_tvm), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mxr (_cmdRouter_io_out_0_bits_status_mxr), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_sum (_cmdRouter_io_out_0_bits_status_sum), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mprv (_cmdRouter_io_out_0_bits_status_mprv), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_xs (_cmdRouter_io_out_0_bits_status_xs), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_fs (_cmdRouter_io_out_0_bits_status_fs), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mpp (_cmdRouter_io_out_0_bits_status_mpp), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_vs (_cmdRouter_io_out_0_bits_status_vs), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_spp (_cmdRouter_io_out_0_bits_status_spp), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mpie (_cmdRouter_io_out_0_bits_status_mpie), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_ube (_cmdRouter_io_out_0_bits_status_ube), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_spie (_cmdRouter_io_out_0_bits_status_spie), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_upie (_cmdRouter_io_out_0_bits_status_upie), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_mie (_cmdRouter_io_out_0_bits_status_mie), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_hie (_cmdRouter_io_out_0_bits_status_hie), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_sie (_cmdRouter_io_out_0_bits_status_sie), // @[LazyRoCC.scala:102:27] .io_cmd_bits_status_uie (_cmdRouter_io_out_0_bits_status_uie), // @[LazyRoCC.scala:102:27] .io_resp_ready (_respArb_io_in_0_q_io_enq_ready), // @[Decoupled.scala:362:21] .io_resp_valid (_rerocc_client_io_resp_valid), .io_resp_bits_rd (_rerocc_client_io_resp_bits_rd), .io_resp_bits_data (_rerocc_client_io_resp_bits_data), .io_mem_req_ready (_dcIF_io_requestor_req_ready), // @[LazyRoCC.scala:106:24] .io_mem_resp_valid (_dcIF_io_requestor_resp_valid), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_addr (_dcIF_io_requestor_resp_bits_addr), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_tag (_dcIF_io_requestor_resp_bits_tag), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_cmd (_dcIF_io_requestor_resp_bits_cmd), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_size (_dcIF_io_requestor_resp_bits_size), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_signed (_dcIF_io_requestor_resp_bits_signed), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_dprv (_dcIF_io_requestor_resp_bits_dprv), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_dv (_dcIF_io_requestor_resp_bits_dv), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_data (_dcIF_io_requestor_resp_bits_data), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_mask (_dcIF_io_requestor_resp_bits_mask), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_replay (_dcIF_io_requestor_resp_bits_replay), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_has_data (_dcIF_io_requestor_resp_bits_has_data), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_data_word_bypass (_dcIF_io_requestor_resp_bits_data_word_bypass), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_data_raw (_dcIF_io_requestor_resp_bits_data_raw), // @[LazyRoCC.scala:106:24] .io_mem_resp_bits_store_data (_dcIF_io_requestor_resp_bits_store_data), // @[LazyRoCC.scala:106:24] .io_busy (_rerocc_client_io_busy), .io_exception (_core_io_rocc_exception), // @[RocketTile.scala:147:20] .io_csrs_0_ren (_core_io_rocc_csrs_0_ren), // @[RocketTile.scala:147:20] .io_csrs_0_wen (_core_io_rocc_csrs_0_wen), // @[RocketTile.scala:147:20] .io_csrs_0_wdata (_core_io_rocc_csrs_0_wdata), // @[RocketTile.scala:147:20] .io_csrs_0_value (_core_io_rocc_csrs_0_value), // @[RocketTile.scala:147:20] .io_csrs_0_sdata (_rerocc_client_io_csrs_0_sdata), .io_csrs_1_ren (_core_io_rocc_csrs_1_ren), // @[RocketTile.scala:147:20] .io_csrs_1_wen (_core_io_rocc_csrs_1_wen), // @[RocketTile.scala:147:20] .io_csrs_1_wdata (_core_io_rocc_csrs_1_wdata), // @[RocketTile.scala:147:20] .io_csrs_1_value (_core_io_rocc_csrs_1_value), // @[RocketTile.scala:147:20] .io_csrs_1_sdata (_rerocc_client_io_csrs_1_sdata), .io_csrs_2_ren (_core_io_rocc_csrs_2_ren), // @[RocketTile.scala:147:20] .io_csrs_2_wen (_core_io_rocc_csrs_2_wen), // @[RocketTile.scala:147:20] .io_csrs_2_wdata (_core_io_rocc_csrs_2_wdata), // @[RocketTile.scala:147:20] .io_csrs_2_value (_core_io_rocc_csrs_2_value), // @[RocketTile.scala:147:20] .io_csrs_2_sdata (_rerocc_client_io_csrs_2_sdata), .io_csrs_3_ren (_core_io_rocc_csrs_3_ren), // @[RocketTile.scala:147:20] .io_csrs_3_wen (_core_io_rocc_csrs_3_wen), // @[RocketTile.scala:147:20] .io_csrs_3_wdata (_core_io_rocc_csrs_3_wdata), // @[RocketTile.scala:147:20] .io_csrs_3_value (_core_io_rocc_csrs_3_value), // @[RocketTile.scala:147:20] .io_csrs_3_sdata (_rerocc_client_io_csrs_3_sdata), .io_csrs_4_ren (_core_io_rocc_csrs_4_ren), // @[RocketTile.scala:147:20] .io_csrs_4_wen (_core_io_rocc_csrs_4_wen), // @[RocketTile.scala:147:20] .io_csrs_4_wdata (_core_io_rocc_csrs_4_wdata), // @[RocketTile.scala:147:20] .io_csrs_4_value (_core_io_rocc_csrs_4_value), // @[RocketTile.scala:147:20] .io_csrs_5_ren (_core_io_rocc_csrs_5_ren), // @[RocketTile.scala:147:20] .io_csrs_5_wen (_core_io_rocc_csrs_5_wen), // @[RocketTile.scala:147:20] .io_csrs_5_wdata (_core_io_rocc_csrs_5_wdata), // @[RocketTile.scala:147:20] .io_csrs_5_value (_core_io_rocc_csrs_5_value), // @[RocketTile.scala:147:20] .io_csrs_5_stall (_rerocc_client_io_csrs_5_stall), .io_csrs_5_sdata (_rerocc_client_io_csrs_5_sdata), .io_csrs_6_ren (_core_io_rocc_csrs_6_ren), // @[RocketTile.scala:147:20] .io_csrs_6_wen (_core_io_rocc_csrs_6_wen), // @[RocketTile.scala:147:20] .io_csrs_6_wdata (_core_io_rocc_csrs_6_wdata), // @[RocketTile.scala:147:20] .io_csrs_6_value (_core_io_rocc_csrs_6_value), // @[RocketTile.scala:147:20] .io_csrs_6_stall (_rerocc_client_io_csrs_6_stall), .io_csrs_6_sdata (_rerocc_client_io_csrs_6_sdata), .io_csrs_7_ren (_core_io_rocc_csrs_7_ren), // @[RocketTile.scala:147:20] .io_csrs_7_wen (_core_io_rocc_csrs_7_wen), // @[RocketTile.scala:147:20] .io_csrs_7_wdata (_core_io_rocc_csrs_7_wdata), // @[RocketTile.scala:147:20] .io_csrs_7_value (_core_io_rocc_csrs_7_value), // @[RocketTile.scala:147:20] .io_csrs_7_stall (_rerocc_client_io_csrs_7_stall), .io_csrs_7_sdata (_rerocc_client_io_csrs_7_sdata), .io_csrs_8_ren (_core_io_rocc_csrs_8_ren), // @[RocketTile.scala:147:20] .io_csrs_8_wen (_core_io_rocc_csrs_8_wen), // @[RocketTile.scala:147:20] .io_csrs_8_wdata (_core_io_rocc_csrs_8_wdata), // @[RocketTile.scala:147:20] .io_csrs_8_value (_core_io_rocc_csrs_8_value), // @[RocketTile.scala:147:20] .io_csrs_8_stall (_rerocc_client_io_csrs_8_stall), .io_csrs_8_sdata (_rerocc_client_io_csrs_8_sdata), .io_csrs_9_ren (_core_io_rocc_csrs_9_ren), // @[RocketTile.scala:147:20] .io_csrs_9_wen (_core_io_rocc_csrs_9_wen), // @[RocketTile.scala:147:20] .io_csrs_9_wdata (_core_io_rocc_csrs_9_wdata), // @[RocketTile.scala:147:20] .io_csrs_9_value (_core_io_rocc_csrs_9_value), // @[RocketTile.scala:147:20] .io_csrs_9_stall (_rerocc_client_io_csrs_9_stall), .io_csrs_9_sdata (_rerocc_client_io_csrs_9_sdata), .io_csrs_10_ren (_core_io_rocc_csrs_10_ren), // @[RocketTile.scala:147:20] .io_csrs_10_wen (_core_io_rocc_csrs_10_wen), // @[RocketTile.scala:147:20] .io_csrs_10_wdata (_core_io_rocc_csrs_10_wdata), // @[RocketTile.scala:147:20] .io_csrs_10_value (_core_io_rocc_csrs_10_value), // @[RocketTile.scala:147:20] .io_csrs_10_stall (_rerocc_client_io_csrs_10_stall), .io_csrs_10_sdata (_rerocc_client_io_csrs_10_sdata), .io_csrs_11_ren (_core_io_rocc_csrs_11_ren), // @[RocketTile.scala:147:20] .io_csrs_11_wen (_core_io_rocc_csrs_11_wen), // @[RocketTile.scala:147:20] .io_csrs_11_wdata (_core_io_rocc_csrs_11_wdata), // @[RocketTile.scala:147:20] .io_csrs_11_value (_core_io_rocc_csrs_11_value), // @[RocketTile.scala:147:20] .io_csrs_11_stall (_rerocc_client_io_csrs_11_stall), .io_csrs_11_sdata (_rerocc_client_io_csrs_11_sdata), .io_csrs_12_ren (_core_io_rocc_csrs_12_ren), // @[RocketTile.scala:147:20] .io_csrs_12_wen (_core_io_rocc_csrs_12_wen), // @[RocketTile.scala:147:20] .io_csrs_12_wdata (_core_io_rocc_csrs_12_wdata), // @[RocketTile.scala:147:20] .io_csrs_12_value (_core_io_rocc_csrs_12_value), // @[RocketTile.scala:147:20] .io_csrs_12_stall (_rerocc_client_io_csrs_12_stall), .io_csrs_12_sdata (_rerocc_client_io_csrs_12_sdata), .io_csrs_13_ren (_core_io_rocc_csrs_13_ren), // @[RocketTile.scala:147:20] .io_csrs_13_wen (_core_io_rocc_csrs_13_wen), // @[RocketTile.scala:147:20] .io_csrs_13_wdata (_core_io_rocc_csrs_13_wdata), // @[RocketTile.scala:147:20] .io_csrs_13_value (_core_io_rocc_csrs_13_value), // @[RocketTile.scala:147:20] .io_csrs_13_stall (_rerocc_client_io_csrs_13_stall), .io_csrs_13_sdata (_rerocc_client_io_csrs_13_sdata), .io_csrs_14_ren (_core_io_rocc_csrs_14_ren), // @[RocketTile.scala:147:20] .io_csrs_14_wen (_core_io_rocc_csrs_14_wen), // @[RocketTile.scala:147:20] .io_csrs_14_wdata (_core_io_rocc_csrs_14_wdata), // @[RocketTile.scala:147:20] .io_csrs_14_value (_core_io_rocc_csrs_14_value), // @[RocketTile.scala:147:20] .io_csrs_14_stall (_rerocc_client_io_csrs_14_stall), .io_csrs_14_sdata (_rerocc_client_io_csrs_14_sdata), .io_csrs_15_ren (_core_io_rocc_csrs_15_ren), // @[RocketTile.scala:147:20] .io_csrs_15_wen (_core_io_rocc_csrs_15_wen), // @[RocketTile.scala:147:20] .io_csrs_15_wdata (_core_io_rocc_csrs_15_wdata), // @[RocketTile.scala:147:20] .io_csrs_15_value (_core_io_rocc_csrs_15_value), // @[RocketTile.scala:147:20] .io_csrs_15_stall (_rerocc_client_io_csrs_15_stall), .io_csrs_15_sdata (_rerocc_client_io_csrs_15_sdata), .io_csrs_16_ren (_core_io_rocc_csrs_16_ren), // @[RocketTile.scala:147:20] .io_csrs_16_wen (_core_io_rocc_csrs_16_wen), // @[RocketTile.scala:147:20] .io_csrs_16_wdata (_core_io_rocc_csrs_16_wdata), // @[RocketTile.scala:147:20] .io_csrs_16_value (_core_io_rocc_csrs_16_value), // @[RocketTile.scala:147:20] .io_csrs_16_stall (_rerocc_client_io_csrs_16_stall), .io_csrs_16_sdata (_rerocc_client_io_csrs_16_sdata), .io_csrs_17_ren (_core_io_rocc_csrs_17_ren), // @[RocketTile.scala:147:20] .io_csrs_17_wen (_core_io_rocc_csrs_17_wen), // @[RocketTile.scala:147:20] .io_csrs_17_wdata (_core_io_rocc_csrs_17_wdata), // @[RocketTile.scala:147:20] .io_csrs_17_value (_core_io_rocc_csrs_17_value), // @[RocketTile.scala:147:20] .io_csrs_17_stall (_rerocc_client_io_csrs_17_stall), .io_csrs_17_sdata (_rerocc_client_io_csrs_17_sdata), .io_csrs_18_ren (_core_io_rocc_csrs_18_ren), // @[RocketTile.scala:147:20] .io_csrs_18_wen (_core_io_rocc_csrs_18_wen), // @[RocketTile.scala:147:20] .io_csrs_18_wdata (_core_io_rocc_csrs_18_wdata), // @[RocketTile.scala:147:20] .io_csrs_18_value (_core_io_rocc_csrs_18_value), // @[RocketTile.scala:147:20] .io_csrs_18_stall (_rerocc_client_io_csrs_18_stall), .io_csrs_18_sdata (_rerocc_client_io_csrs_18_sdata), .io_csrs_19_ren (_core_io_rocc_csrs_19_ren), // @[RocketTile.scala:147:20] .io_csrs_19_wen (_core_io_rocc_csrs_19_wen), // @[RocketTile.scala:147:20] .io_csrs_19_wdata (_core_io_rocc_csrs_19_wdata), // @[RocketTile.scala:147:20] .io_csrs_19_value (_core_io_rocc_csrs_19_value), // @[RocketTile.scala:147:20] .io_csrs_19_stall (_rerocc_client_io_csrs_19_stall), .io_csrs_19_sdata (_rerocc_client_io_csrs_19_sdata), .io_csrs_20_ren (_core_io_rocc_csrs_20_ren), // @[RocketTile.scala:147:20] .io_csrs_20_wen (_core_io_rocc_csrs_20_wen), // @[RocketTile.scala:147:20] .io_csrs_20_wdata (_core_io_rocc_csrs_20_wdata), // @[RocketTile.scala:147:20] .io_csrs_20_value (_core_io_rocc_csrs_20_value), // @[RocketTile.scala:147:20] .io_csrs_20_stall (_rerocc_client_io_csrs_20_stall), .io_csrs_20_sdata (_rerocc_client_io_csrs_20_sdata), .io_ptw_0_req_ready (_ptw_io_requestor_0_req_ready), // @[PTW.scala:802:19] .io_ptw_0_resp_valid (_ptw_io_requestor_0_resp_valid), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), // @[PTW.scala:802:19] .io_ptw_0_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), // @[PTW.scala:802:19] .io_ptw_0_ptbr_mode (_ptw_io_requestor_0_ptbr_mode), // @[PTW.scala:802:19] .io_ptw_0_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn), // @[PTW.scala:802:19] .io_ptw_0_status_debug (_ptw_io_requestor_0_status_debug), // @[PTW.scala:802:19] .io_ptw_0_status_cease (_ptw_io_requestor_0_status_cease), // @[PTW.scala:802:19] .io_ptw_0_status_wfi (_ptw_io_requestor_0_status_wfi), // @[PTW.scala:802:19] .io_ptw_0_status_isa (_ptw_io_requestor_0_status_isa), // @[PTW.scala:802:19] .io_ptw_0_status_dprv (_ptw_io_requestor_0_status_dprv), // @[PTW.scala:802:19] .io_ptw_0_status_dv (_ptw_io_requestor_0_status_dv), // @[PTW.scala:802:19] .io_ptw_0_status_prv (_ptw_io_requestor_0_status_prv), // @[PTW.scala:802:19] .io_ptw_0_status_v (_ptw_io_requestor_0_status_v), // @[PTW.scala:802:19] .io_ptw_0_status_mpv (_ptw_io_requestor_0_status_mpv), // @[PTW.scala:802:19] .io_ptw_0_status_gva (_ptw_io_requestor_0_status_gva), // @[PTW.scala:802:19] .io_ptw_0_status_tsr (_ptw_io_requestor_0_status_tsr), // @[PTW.scala:802:19] .io_ptw_0_status_tw (_ptw_io_requestor_0_status_tw), // @[PTW.scala:802:19] .io_ptw_0_status_tvm (_ptw_io_requestor_0_status_tvm), // @[PTW.scala:802:19] .io_ptw_0_status_mxr (_ptw_io_requestor_0_status_mxr), // @[PTW.scala:802:19] .io_ptw_0_status_sum (_ptw_io_requestor_0_status_sum), // @[PTW.scala:802:19] .io_ptw_0_status_mprv (_ptw_io_requestor_0_status_mprv), // @[PTW.scala:802:19] .io_ptw_0_status_fs (_ptw_io_requestor_0_status_fs), // @[PTW.scala:802:19] .io_ptw_0_status_mpp (_ptw_io_requestor_0_status_mpp), // @[PTW.scala:802:19] .io_ptw_0_status_spp (_ptw_io_requestor_0_status_spp), // @[PTW.scala:802:19] .io_ptw_0_status_mpie (_ptw_io_requestor_0_status_mpie), // @[PTW.scala:802:19] .io_ptw_0_status_spie (_ptw_io_requestor_0_status_spie), // @[PTW.scala:802:19] .io_ptw_0_status_mie (_ptw_io_requestor_0_status_mie), // @[PTW.scala:802:19] .io_ptw_0_status_sie (_ptw_io_requestor_0_status_sie), // @[PTW.scala:802:19] .io_ptw_0_hstatus_spvp (_ptw_io_requestor_0_hstatus_spvp), // @[PTW.scala:802:19] .io_ptw_0_hstatus_spv (_ptw_io_requestor_0_hstatus_spv), // @[PTW.scala:802:19] .io_ptw_0_hstatus_gva (_ptw_io_requestor_0_hstatus_gva), // @[PTW.scala:802:19] .io_ptw_0_gstatus_debug (_ptw_io_requestor_0_gstatus_debug), // @[PTW.scala:802:19] .io_ptw_0_gstatus_cease (_ptw_io_requestor_0_gstatus_cease), // @[PTW.scala:802:19] .io_ptw_0_gstatus_wfi (_ptw_io_requestor_0_gstatus_wfi), // @[PTW.scala:802:19] .io_ptw_0_gstatus_isa (_ptw_io_requestor_0_gstatus_isa), // @[PTW.scala:802:19] .io_ptw_0_gstatus_dprv (_ptw_io_requestor_0_gstatus_dprv), // @[PTW.scala:802:19] .io_ptw_0_gstatus_dv (_ptw_io_requestor_0_gstatus_dv), // @[PTW.scala:802:19] .io_ptw_0_gstatus_prv (_ptw_io_requestor_0_gstatus_prv), // @[PTW.scala:802:19] .io_ptw_0_gstatus_v (_ptw_io_requestor_0_gstatus_v), // @[PTW.scala:802:19] .io_ptw_0_gstatus_zero2 (_ptw_io_requestor_0_gstatus_zero2), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mpv (_ptw_io_requestor_0_gstatus_mpv), // @[PTW.scala:802:19] .io_ptw_0_gstatus_gva (_ptw_io_requestor_0_gstatus_gva), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mbe (_ptw_io_requestor_0_gstatus_mbe), // @[PTW.scala:802:19] .io_ptw_0_gstatus_sbe (_ptw_io_requestor_0_gstatus_sbe), // @[PTW.scala:802:19] .io_ptw_0_gstatus_sxl (_ptw_io_requestor_0_gstatus_sxl), // @[PTW.scala:802:19] .io_ptw_0_gstatus_zero1 (_ptw_io_requestor_0_gstatus_zero1), // @[PTW.scala:802:19] .io_ptw_0_gstatus_tsr (_ptw_io_requestor_0_gstatus_tsr), // @[PTW.scala:802:19] .io_ptw_0_gstatus_tw (_ptw_io_requestor_0_gstatus_tw), // @[PTW.scala:802:19] .io_ptw_0_gstatus_tvm (_ptw_io_requestor_0_gstatus_tvm), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mxr (_ptw_io_requestor_0_gstatus_mxr), // @[PTW.scala:802:19] .io_ptw_0_gstatus_sum (_ptw_io_requestor_0_gstatus_sum), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mprv (_ptw_io_requestor_0_gstatus_mprv), // @[PTW.scala:802:19] .io_ptw_0_gstatus_fs (_ptw_io_requestor_0_gstatus_fs), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mpp (_ptw_io_requestor_0_gstatus_mpp), // @[PTW.scala:802:19] .io_ptw_0_gstatus_vs (_ptw_io_requestor_0_gstatus_vs), // @[PTW.scala:802:19] .io_ptw_0_gstatus_spp (_ptw_io_requestor_0_gstatus_spp), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mpie (_ptw_io_requestor_0_gstatus_mpie), // @[PTW.scala:802:19] .io_ptw_0_gstatus_ube (_ptw_io_requestor_0_gstatus_ube), // @[PTW.scala:802:19] .io_ptw_0_gstatus_spie (_ptw_io_requestor_0_gstatus_spie), // @[PTW.scala:802:19] .io_ptw_0_gstatus_upie (_ptw_io_requestor_0_gstatus_upie), // @[PTW.scala:802:19] .io_ptw_0_gstatus_mie (_ptw_io_requestor_0_gstatus_mie), // @[PTW.scala:802:19] .io_ptw_0_gstatus_hie (_ptw_io_requestor_0_gstatus_hie), // @[PTW.scala:802:19] .io_ptw_0_gstatus_sie (_ptw_io_requestor_0_gstatus_sie), // @[PTW.scala:802:19] .io_ptw_0_gstatus_uie (_ptw_io_requestor_0_gstatus_uie), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_cfg_l (_ptw_io_requestor_0_pmp_0_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_cfg_a (_ptw_io_requestor_0_pmp_0_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_cfg_x (_ptw_io_requestor_0_pmp_0_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_cfg_w (_ptw_io_requestor_0_pmp_0_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_cfg_r (_ptw_io_requestor_0_pmp_0_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_addr (_ptw_io_requestor_0_pmp_0_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_0_mask (_ptw_io_requestor_0_pmp_0_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_cfg_l (_ptw_io_requestor_0_pmp_1_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_cfg_a (_ptw_io_requestor_0_pmp_1_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_cfg_x (_ptw_io_requestor_0_pmp_1_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_cfg_w (_ptw_io_requestor_0_pmp_1_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_cfg_r (_ptw_io_requestor_0_pmp_1_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_addr (_ptw_io_requestor_0_pmp_1_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_1_mask (_ptw_io_requestor_0_pmp_1_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_cfg_l (_ptw_io_requestor_0_pmp_2_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_cfg_a (_ptw_io_requestor_0_pmp_2_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_cfg_x (_ptw_io_requestor_0_pmp_2_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_cfg_w (_ptw_io_requestor_0_pmp_2_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_cfg_r (_ptw_io_requestor_0_pmp_2_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_addr (_ptw_io_requestor_0_pmp_2_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_2_mask (_ptw_io_requestor_0_pmp_2_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_cfg_l (_ptw_io_requestor_0_pmp_3_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_cfg_a (_ptw_io_requestor_0_pmp_3_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_cfg_x (_ptw_io_requestor_0_pmp_3_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_cfg_w (_ptw_io_requestor_0_pmp_3_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_cfg_r (_ptw_io_requestor_0_pmp_3_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_addr (_ptw_io_requestor_0_pmp_3_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_3_mask (_ptw_io_requestor_0_pmp_3_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_cfg_l (_ptw_io_requestor_0_pmp_4_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_cfg_a (_ptw_io_requestor_0_pmp_4_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_cfg_x (_ptw_io_requestor_0_pmp_4_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_cfg_w (_ptw_io_requestor_0_pmp_4_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_cfg_r (_ptw_io_requestor_0_pmp_4_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_addr (_ptw_io_requestor_0_pmp_4_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_4_mask (_ptw_io_requestor_0_pmp_4_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_cfg_l (_ptw_io_requestor_0_pmp_5_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_cfg_a (_ptw_io_requestor_0_pmp_5_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_cfg_x (_ptw_io_requestor_0_pmp_5_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_cfg_w (_ptw_io_requestor_0_pmp_5_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_cfg_r (_ptw_io_requestor_0_pmp_5_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_addr (_ptw_io_requestor_0_pmp_5_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_5_mask (_ptw_io_requestor_0_pmp_5_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_cfg_l (_ptw_io_requestor_0_pmp_6_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_cfg_a (_ptw_io_requestor_0_pmp_6_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_cfg_x (_ptw_io_requestor_0_pmp_6_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_cfg_w (_ptw_io_requestor_0_pmp_6_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_cfg_r (_ptw_io_requestor_0_pmp_6_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_addr (_ptw_io_requestor_0_pmp_6_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_6_mask (_ptw_io_requestor_0_pmp_6_mask), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_cfg_l (_ptw_io_requestor_0_pmp_7_cfg_l), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_cfg_a (_ptw_io_requestor_0_pmp_7_cfg_a), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_cfg_x (_ptw_io_requestor_0_pmp_7_cfg_x), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_cfg_w (_ptw_io_requestor_0_pmp_7_cfg_w), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_cfg_r (_ptw_io_requestor_0_pmp_7_cfg_r), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_addr (_ptw_io_requestor_0_pmp_7_addr), // @[PTW.scala:802:19] .io_ptw_0_pmp_7_mask (_ptw_io_requestor_0_pmp_7_mask), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_0_ren (_ptw_io_requestor_0_customCSRs_csrs_0_ren), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_0_wen (_ptw_io_requestor_0_customCSRs_csrs_0_wen), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_0_wdata (_ptw_io_requestor_0_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_0_value (_ptw_io_requestor_0_customCSRs_csrs_0_value), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_1_ren (_ptw_io_requestor_0_customCSRs_csrs_1_ren), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_1_wen (_ptw_io_requestor_0_customCSRs_csrs_1_wen), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_1_wdata (_ptw_io_requestor_0_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_1_value (_ptw_io_requestor_0_customCSRs_csrs_1_value), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_2_ren (_ptw_io_requestor_0_customCSRs_csrs_2_ren), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_2_wen (_ptw_io_requestor_0_customCSRs_csrs_2_wen), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_2_wdata (_ptw_io_requestor_0_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_2_value (_ptw_io_requestor_0_customCSRs_csrs_2_value), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_3_ren (_ptw_io_requestor_0_customCSRs_csrs_3_ren), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_3_wen (_ptw_io_requestor_0_customCSRs_csrs_3_wen), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_3_wdata (_ptw_io_requestor_0_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19] .io_ptw_0_customCSRs_csrs_3_value (_ptw_io_requestor_0_customCSRs_csrs_3_value), // @[PTW.scala:802:19] .io_ptw_1_req_ready (_ptw_io_requestor_1_req_ready), // @[PTW.scala:802:19] .io_ptw_1_resp_valid (_ptw_io_requestor_1_resp_valid), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_ae_ptw (_ptw_io_requestor_1_resp_bits_ae_ptw), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_ae_final (_ptw_io_requestor_1_resp_bits_ae_final), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pf (_ptw_io_requestor_1_resp_bits_pf), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_gf (_ptw_io_requestor_1_resp_bits_gf), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_hr (_ptw_io_requestor_1_resp_bits_hr), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_hw (_ptw_io_requestor_1_resp_bits_hw), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_hx (_ptw_io_requestor_1_resp_bits_hx), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_reserved_for_future (_ptw_io_requestor_1_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_ppn (_ptw_io_requestor_1_resp_bits_pte_ppn), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_reserved_for_software (_ptw_io_requestor_1_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_d (_ptw_io_requestor_1_resp_bits_pte_d), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_a (_ptw_io_requestor_1_resp_bits_pte_a), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_g (_ptw_io_requestor_1_resp_bits_pte_g), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_u (_ptw_io_requestor_1_resp_bits_pte_u), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_x (_ptw_io_requestor_1_resp_bits_pte_x), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_w (_ptw_io_requestor_1_resp_bits_pte_w), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_r (_ptw_io_requestor_1_resp_bits_pte_r), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_pte_v (_ptw_io_requestor_1_resp_bits_pte_v), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_level (_ptw_io_requestor_1_resp_bits_level), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_homogeneous (_ptw_io_requestor_1_resp_bits_homogeneous), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_gpa_valid (_ptw_io_requestor_1_resp_bits_gpa_valid), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_gpa_bits (_ptw_io_requestor_1_resp_bits_gpa_bits), // @[PTW.scala:802:19] .io_ptw_1_resp_bits_gpa_is_pte (_ptw_io_requestor_1_resp_bits_gpa_is_pte), // @[PTW.scala:802:19] .io_ptw_1_ptbr_mode (_ptw_io_requestor_1_ptbr_mode), // @[PTW.scala:802:19] .io_ptw_1_ptbr_ppn (_ptw_io_requestor_1_ptbr_ppn), // @[PTW.scala:802:19] .io_ptw_1_status_debug (_ptw_io_requestor_1_status_debug), // @[PTW.scala:802:19] .io_ptw_1_status_cease (_ptw_io_requestor_1_status_cease), // @[PTW.scala:802:19] .io_ptw_1_status_wfi (_ptw_io_requestor_1_status_wfi), // @[PTW.scala:802:19] .io_ptw_1_status_isa (_ptw_io_requestor_1_status_isa), // @[PTW.scala:802:19] .io_ptw_1_status_dprv (_ptw_io_requestor_1_status_dprv), // @[PTW.scala:802:19] .io_ptw_1_status_dv (_ptw_io_requestor_1_status_dv), // @[PTW.scala:802:19] .io_ptw_1_status_prv (_ptw_io_requestor_1_status_prv), // @[PTW.scala:802:19] .io_ptw_1_status_v (_ptw_io_requestor_1_status_v), // @[PTW.scala:802:19] .io_ptw_1_status_mpv (_ptw_io_requestor_1_status_mpv), // @[PTW.scala:802:19] .io_ptw_1_status_gva (_ptw_io_requestor_1_status_gva), // @[PTW.scala:802:19] .io_ptw_1_status_tsr (_ptw_io_requestor_1_status_tsr), // @[PTW.scala:802:19] .io_ptw_1_status_tw (_ptw_io_requestor_1_status_tw), // @[PTW.scala:802:19] .io_ptw_1_status_tvm (_ptw_io_requestor_1_status_tvm), // @[PTW.scala:802:19] .io_ptw_1_status_mxr (_ptw_io_requestor_1_status_mxr), // @[PTW.scala:802:19] .io_ptw_1_status_sum (_ptw_io_requestor_1_status_sum), // @[PTW.scala:802:19] .io_ptw_1_status_mprv (_ptw_io_requestor_1_status_mprv), // @[PTW.scala:802:19] .io_ptw_1_status_fs (_ptw_io_requestor_1_status_fs), // @[PTW.scala:802:19] .io_ptw_1_status_mpp (_ptw_io_requestor_1_status_mpp), // @[PTW.scala:802:19] .io_ptw_1_status_spp (_ptw_io_requestor_1_status_spp), // @[PTW.scala:802:19] .io_ptw_1_status_mpie (_ptw_io_requestor_1_status_mpie), // @[PTW.scala:802:19] .io_ptw_1_status_spie (_ptw_io_requestor_1_status_spie), // @[PTW.scala:802:19] .io_ptw_1_status_mie (_ptw_io_requestor_1_status_mie), // @[PTW.scala:802:19] .io_ptw_1_status_sie (_ptw_io_requestor_1_status_sie), // @[PTW.scala:802:19] .io_ptw_1_hstatus_spvp (_ptw_io_requestor_1_hstatus_spvp), // @[PTW.scala:802:19] .io_ptw_1_hstatus_spv (_ptw_io_requestor_1_hstatus_spv), // @[PTW.scala:802:19] .io_ptw_1_hstatus_gva (_ptw_io_requestor_1_hstatus_gva), // @[PTW.scala:802:19] .io_ptw_1_gstatus_debug (_ptw_io_requestor_1_gstatus_debug), // @[PTW.scala:802:19] .io_ptw_1_gstatus_cease (_ptw_io_requestor_1_gstatus_cease), // @[PTW.scala:802:19] .io_ptw_1_gstatus_wfi (_ptw_io_requestor_1_gstatus_wfi), // @[PTW.scala:802:19] .io_ptw_1_gstatus_isa (_ptw_io_requestor_1_gstatus_isa), // @[PTW.scala:802:19] .io_ptw_1_gstatus_dprv (_ptw_io_requestor_1_gstatus_dprv), // @[PTW.scala:802:19] .io_ptw_1_gstatus_dv (_ptw_io_requestor_1_gstatus_dv), // @[PTW.scala:802:19] .io_ptw_1_gstatus_prv (_ptw_io_requestor_1_gstatus_prv), // @[PTW.scala:802:19] .io_ptw_1_gstatus_v (_ptw_io_requestor_1_gstatus_v), // @[PTW.scala:802:19] .io_ptw_1_gstatus_zero2 (_ptw_io_requestor_1_gstatus_zero2), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mpv (_ptw_io_requestor_1_gstatus_mpv), // @[PTW.scala:802:19] .io_ptw_1_gstatus_gva (_ptw_io_requestor_1_gstatus_gva), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mbe (_ptw_io_requestor_1_gstatus_mbe), // @[PTW.scala:802:19] .io_ptw_1_gstatus_sbe (_ptw_io_requestor_1_gstatus_sbe), // @[PTW.scala:802:19] .io_ptw_1_gstatus_sxl (_ptw_io_requestor_1_gstatus_sxl), // @[PTW.scala:802:19] .io_ptw_1_gstatus_zero1 (_ptw_io_requestor_1_gstatus_zero1), // @[PTW.scala:802:19] .io_ptw_1_gstatus_tsr (_ptw_io_requestor_1_gstatus_tsr), // @[PTW.scala:802:19] .io_ptw_1_gstatus_tw (_ptw_io_requestor_1_gstatus_tw), // @[PTW.scala:802:19] .io_ptw_1_gstatus_tvm (_ptw_io_requestor_1_gstatus_tvm), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mxr (_ptw_io_requestor_1_gstatus_mxr), // @[PTW.scala:802:19] .io_ptw_1_gstatus_sum (_ptw_io_requestor_1_gstatus_sum), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mprv (_ptw_io_requestor_1_gstatus_mprv), // @[PTW.scala:802:19] .io_ptw_1_gstatus_fs (_ptw_io_requestor_1_gstatus_fs), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mpp (_ptw_io_requestor_1_gstatus_mpp), // @[PTW.scala:802:19] .io_ptw_1_gstatus_vs (_ptw_io_requestor_1_gstatus_vs), // @[PTW.scala:802:19] .io_ptw_1_gstatus_spp (_ptw_io_requestor_1_gstatus_spp), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mpie (_ptw_io_requestor_1_gstatus_mpie), // @[PTW.scala:802:19] .io_ptw_1_gstatus_ube (_ptw_io_requestor_1_gstatus_ube), // @[PTW.scala:802:19] .io_ptw_1_gstatus_spie (_ptw_io_requestor_1_gstatus_spie), // @[PTW.scala:802:19] .io_ptw_1_gstatus_upie (_ptw_io_requestor_1_gstatus_upie), // @[PTW.scala:802:19] .io_ptw_1_gstatus_mie (_ptw_io_requestor_1_gstatus_mie), // @[PTW.scala:802:19] .io_ptw_1_gstatus_hie (_ptw_io_requestor_1_gstatus_hie), // @[PTW.scala:802:19] .io_ptw_1_gstatus_sie (_ptw_io_requestor_1_gstatus_sie), // @[PTW.scala:802:19] .io_ptw_1_gstatus_uie (_ptw_io_requestor_1_gstatus_uie), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_cfg_l (_ptw_io_requestor_1_pmp_0_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_cfg_a (_ptw_io_requestor_1_pmp_0_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_cfg_x (_ptw_io_requestor_1_pmp_0_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_cfg_w (_ptw_io_requestor_1_pmp_0_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_cfg_r (_ptw_io_requestor_1_pmp_0_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_addr (_ptw_io_requestor_1_pmp_0_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_0_mask (_ptw_io_requestor_1_pmp_0_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_cfg_l (_ptw_io_requestor_1_pmp_1_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_cfg_a (_ptw_io_requestor_1_pmp_1_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_cfg_x (_ptw_io_requestor_1_pmp_1_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_cfg_w (_ptw_io_requestor_1_pmp_1_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_cfg_r (_ptw_io_requestor_1_pmp_1_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_addr (_ptw_io_requestor_1_pmp_1_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_1_mask (_ptw_io_requestor_1_pmp_1_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_cfg_l (_ptw_io_requestor_1_pmp_2_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_cfg_a (_ptw_io_requestor_1_pmp_2_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_cfg_x (_ptw_io_requestor_1_pmp_2_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_cfg_w (_ptw_io_requestor_1_pmp_2_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_cfg_r (_ptw_io_requestor_1_pmp_2_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_addr (_ptw_io_requestor_1_pmp_2_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_2_mask (_ptw_io_requestor_1_pmp_2_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_cfg_l (_ptw_io_requestor_1_pmp_3_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_cfg_a (_ptw_io_requestor_1_pmp_3_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_cfg_x (_ptw_io_requestor_1_pmp_3_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_cfg_w (_ptw_io_requestor_1_pmp_3_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_cfg_r (_ptw_io_requestor_1_pmp_3_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_addr (_ptw_io_requestor_1_pmp_3_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_3_mask (_ptw_io_requestor_1_pmp_3_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_cfg_l (_ptw_io_requestor_1_pmp_4_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_cfg_a (_ptw_io_requestor_1_pmp_4_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_cfg_x (_ptw_io_requestor_1_pmp_4_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_cfg_w (_ptw_io_requestor_1_pmp_4_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_cfg_r (_ptw_io_requestor_1_pmp_4_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_addr (_ptw_io_requestor_1_pmp_4_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_4_mask (_ptw_io_requestor_1_pmp_4_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_cfg_l (_ptw_io_requestor_1_pmp_5_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_cfg_a (_ptw_io_requestor_1_pmp_5_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_cfg_x (_ptw_io_requestor_1_pmp_5_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_cfg_w (_ptw_io_requestor_1_pmp_5_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_cfg_r (_ptw_io_requestor_1_pmp_5_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_addr (_ptw_io_requestor_1_pmp_5_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_5_mask (_ptw_io_requestor_1_pmp_5_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_cfg_l (_ptw_io_requestor_1_pmp_6_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_cfg_a (_ptw_io_requestor_1_pmp_6_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_cfg_x (_ptw_io_requestor_1_pmp_6_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_cfg_w (_ptw_io_requestor_1_pmp_6_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_cfg_r (_ptw_io_requestor_1_pmp_6_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_addr (_ptw_io_requestor_1_pmp_6_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_6_mask (_ptw_io_requestor_1_pmp_6_mask), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_cfg_l (_ptw_io_requestor_1_pmp_7_cfg_l), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_cfg_a (_ptw_io_requestor_1_pmp_7_cfg_a), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_cfg_x (_ptw_io_requestor_1_pmp_7_cfg_x), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_cfg_w (_ptw_io_requestor_1_pmp_7_cfg_w), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_cfg_r (_ptw_io_requestor_1_pmp_7_cfg_r), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_addr (_ptw_io_requestor_1_pmp_7_addr), // @[PTW.scala:802:19] .io_ptw_1_pmp_7_mask (_ptw_io_requestor_1_pmp_7_mask), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_0_ren (_ptw_io_requestor_1_customCSRs_csrs_0_ren), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_0_wen (_ptw_io_requestor_1_customCSRs_csrs_0_wen), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_0_wdata (_ptw_io_requestor_1_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_0_value (_ptw_io_requestor_1_customCSRs_csrs_0_value), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_1_ren (_ptw_io_requestor_1_customCSRs_csrs_1_ren), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_1_wen (_ptw_io_requestor_1_customCSRs_csrs_1_wen), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_1_wdata (_ptw_io_requestor_1_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_1_value (_ptw_io_requestor_1_customCSRs_csrs_1_value), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_2_ren (_ptw_io_requestor_1_customCSRs_csrs_2_ren), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_2_wen (_ptw_io_requestor_1_customCSRs_csrs_2_wen), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_2_wdata (_ptw_io_requestor_1_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_2_value (_ptw_io_requestor_1_customCSRs_csrs_2_value), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_3_ren (_ptw_io_requestor_1_customCSRs_csrs_3_ren), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_3_wen (_ptw_io_requestor_1_customCSRs_csrs_3_wen), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_3_wdata (_ptw_io_requestor_1_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19] .io_ptw_1_customCSRs_csrs_3_value (_ptw_io_requestor_1_customCSRs_csrs_3_value) // @[PTW.scala:802:19] ); // @[Configs.scala:14:35] Frontend frontend ( // @[Frontend.scala:393:28] .clock (clock), .reset (reset), .auto_icache_master_out_a_ready (widget_1_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9] .auto_icache_master_out_a_valid (widget_1_auto_anon_in_a_valid), .auto_icache_master_out_a_bits_address (widget_1_auto_anon_in_a_bits_address), .auto_icache_master_out_d_valid (widget_1_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_opcode (widget_1_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_param (widget_1_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_size (widget_1_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_sink (widget_1_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_denied (widget_1_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_data (widget_1_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_corrupt (widget_1_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9] .io_cpu_might_request (_core_io_imem_might_request), // @[RocketTile.scala:147:20] .io_cpu_req_valid (_core_io_imem_req_valid), // @[RocketTile.scala:147:20] .io_cpu_req_bits_pc (_core_io_imem_req_bits_pc), // @[RocketTile.scala:147:20] .io_cpu_req_bits_speculative (_core_io_imem_req_bits_speculative), // @[RocketTile.scala:147:20] .io_cpu_sfence_valid (_core_io_imem_sfence_valid), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_rs1 (_core_io_imem_sfence_bits_rs1), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_rs2 (_core_io_imem_sfence_bits_rs2), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_addr (_core_io_imem_sfence_bits_addr), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_asid (_core_io_imem_sfence_bits_asid), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_hv (_core_io_imem_sfence_bits_hv), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_hg (_core_io_imem_sfence_bits_hg), // @[RocketTile.scala:147:20] .io_cpu_resp_ready (_core_io_imem_resp_ready), // @[RocketTile.scala:147:20] .io_cpu_resp_valid (_frontend_io_cpu_resp_valid), .io_cpu_resp_bits_btb_cfiType (_frontend_io_cpu_resp_bits_btb_cfiType), .io_cpu_resp_bits_btb_taken (_frontend_io_cpu_resp_bits_btb_taken), .io_cpu_resp_bits_btb_mask (_frontend_io_cpu_resp_bits_btb_mask), .io_cpu_resp_bits_btb_bridx (_frontend_io_cpu_resp_bits_btb_bridx), .io_cpu_resp_bits_btb_target (_frontend_io_cpu_resp_bits_btb_target), .io_cpu_resp_bits_btb_entry (_frontend_io_cpu_resp_bits_btb_entry), .io_cpu_resp_bits_btb_bht_history (_frontend_io_cpu_resp_bits_btb_bht_history), .io_cpu_resp_bits_btb_bht_value (_frontend_io_cpu_resp_bits_btb_bht_value), .io_cpu_resp_bits_pc (_frontend_io_cpu_resp_bits_pc), .io_cpu_resp_bits_data (_frontend_io_cpu_resp_bits_data), .io_cpu_resp_bits_mask (_frontend_io_cpu_resp_bits_mask), .io_cpu_resp_bits_xcpt_pf_inst (_frontend_io_cpu_resp_bits_xcpt_pf_inst), .io_cpu_resp_bits_xcpt_gf_inst (_frontend_io_cpu_resp_bits_xcpt_gf_inst), .io_cpu_resp_bits_xcpt_ae_inst (_frontend_io_cpu_resp_bits_xcpt_ae_inst), .io_cpu_resp_bits_replay (_frontend_io_cpu_resp_bits_replay), .io_cpu_gpa_valid (_frontend_io_cpu_gpa_valid), .io_cpu_gpa_bits (_frontend_io_cpu_gpa_bits), .io_cpu_gpa_is_pte (_frontend_io_cpu_gpa_is_pte), .io_cpu_btb_update_valid (_core_io_imem_btb_update_valid), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_cfiType (_core_io_imem_btb_update_bits_prediction_cfiType), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_taken (_core_io_imem_btb_update_bits_prediction_taken), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_mask (_core_io_imem_btb_update_bits_prediction_mask), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_bridx (_core_io_imem_btb_update_bits_prediction_bridx), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_target (_core_io_imem_btb_update_bits_prediction_target), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_entry (_core_io_imem_btb_update_bits_prediction_entry), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_bht_history (_core_io_imem_btb_update_bits_prediction_bht_history), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_bht_value (_core_io_imem_btb_update_bits_prediction_bht_value), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_pc (_core_io_imem_btb_update_bits_pc), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_target (_core_io_imem_btb_update_bits_target), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_isValid (_core_io_imem_btb_update_bits_isValid), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_br_pc (_core_io_imem_btb_update_bits_br_pc), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_cfiType (_core_io_imem_btb_update_bits_cfiType), // @[RocketTile.scala:147:20] .io_cpu_bht_update_valid (_core_io_imem_bht_update_valid), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_prediction_history (_core_io_imem_bht_update_bits_prediction_history), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_prediction_value (_core_io_imem_bht_update_bits_prediction_value), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_pc (_core_io_imem_bht_update_bits_pc), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_branch (_core_io_imem_bht_update_bits_branch), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_taken (_core_io_imem_bht_update_bits_taken), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_mispredict (_core_io_imem_bht_update_bits_mispredict), // @[RocketTile.scala:147:20] .io_cpu_flush_icache (_core_io_imem_flush_icache), // @[RocketTile.scala:147:20] .io_cpu_npc (_frontend_io_cpu_npc), .io_cpu_perf_acquire (_frontend_io_cpu_perf_acquire), .io_cpu_perf_tlbMiss (_frontend_io_cpu_perf_tlbMiss), .io_cpu_progress (_core_io_imem_progress), // @[RocketTile.scala:147:20] .io_ptw_req_ready (_ptw_io_requestor_3_req_ready), // @[PTW.scala:802:19] .io_ptw_req_valid (_frontend_io_ptw_req_valid), .io_ptw_req_bits_valid (_frontend_io_ptw_req_bits_valid), .io_ptw_req_bits_bits_addr (_frontend_io_ptw_req_bits_bits_addr), .io_ptw_req_bits_bits_need_gpa (_frontend_io_ptw_req_bits_bits_need_gpa), .io_ptw_resp_valid (_ptw_io_requestor_3_resp_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_3_resp_bits_ae_ptw), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_final (_ptw_io_requestor_3_resp_bits_ae_final), // @[PTW.scala:802:19] .io_ptw_resp_bits_pf (_ptw_io_requestor_3_resp_bits_pf), // @[PTW.scala:802:19] .io_ptw_resp_bits_gf (_ptw_io_requestor_3_resp_bits_gf), // @[PTW.scala:802:19] .io_ptw_resp_bits_hr (_ptw_io_requestor_3_resp_bits_hr), // @[PTW.scala:802:19] .io_ptw_resp_bits_hw (_ptw_io_requestor_3_resp_bits_hw), // @[PTW.scala:802:19] .io_ptw_resp_bits_hx (_ptw_io_requestor_3_resp_bits_hx), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_3_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_3_resp_bits_pte_ppn), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_3_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_d (_ptw_io_requestor_3_resp_bits_pte_d), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_a (_ptw_io_requestor_3_resp_bits_pte_a), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_g (_ptw_io_requestor_3_resp_bits_pte_g), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_u (_ptw_io_requestor_3_resp_bits_pte_u), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_x (_ptw_io_requestor_3_resp_bits_pte_x), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_w (_ptw_io_requestor_3_resp_bits_pte_w), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_r (_ptw_io_requestor_3_resp_bits_pte_r), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_v (_ptw_io_requestor_3_resp_bits_pte_v), // @[PTW.scala:802:19] .io_ptw_resp_bits_level (_ptw_io_requestor_3_resp_bits_level), // @[PTW.scala:802:19] .io_ptw_resp_bits_homogeneous (_ptw_io_requestor_3_resp_bits_homogeneous), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_3_resp_bits_gpa_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_3_resp_bits_gpa_bits), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_3_resp_bits_gpa_is_pte), // @[PTW.scala:802:19] .io_ptw_ptbr_mode (_ptw_io_requestor_3_ptbr_mode), // @[PTW.scala:802:19] .io_ptw_ptbr_ppn (_ptw_io_requestor_3_ptbr_ppn), // @[PTW.scala:802:19] .io_ptw_status_debug (_ptw_io_requestor_3_status_debug), // @[PTW.scala:802:19] .io_ptw_status_cease (_ptw_io_requestor_3_status_cease), // @[PTW.scala:802:19] .io_ptw_status_wfi (_ptw_io_requestor_3_status_wfi), // @[PTW.scala:802:19] .io_ptw_status_isa (_ptw_io_requestor_3_status_isa), // @[PTW.scala:802:19] .io_ptw_status_dprv (_ptw_io_requestor_3_status_dprv), // @[PTW.scala:802:19] .io_ptw_status_dv (_ptw_io_requestor_3_status_dv), // @[PTW.scala:802:19] .io_ptw_status_prv (_ptw_io_requestor_3_status_prv), // @[PTW.scala:802:19] .io_ptw_status_v (_ptw_io_requestor_3_status_v), // @[PTW.scala:802:19] .io_ptw_status_mpv (_ptw_io_requestor_3_status_mpv), // @[PTW.scala:802:19] .io_ptw_status_gva (_ptw_io_requestor_3_status_gva), // @[PTW.scala:802:19] .io_ptw_status_tsr (_ptw_io_requestor_3_status_tsr), // @[PTW.scala:802:19] .io_ptw_status_tw (_ptw_io_requestor_3_status_tw), // @[PTW.scala:802:19] .io_ptw_status_tvm (_ptw_io_requestor_3_status_tvm), // @[PTW.scala:802:19] .io_ptw_status_mxr (_ptw_io_requestor_3_status_mxr), // @[PTW.scala:802:19] .io_ptw_status_sum (_ptw_io_requestor_3_status_sum), // @[PTW.scala:802:19] .io_ptw_status_mprv (_ptw_io_requestor_3_status_mprv), // @[PTW.scala:802:19] .io_ptw_status_fs (_ptw_io_requestor_3_status_fs), // @[PTW.scala:802:19] .io_ptw_status_mpp (_ptw_io_requestor_3_status_mpp), // @[PTW.scala:802:19] .io_ptw_status_spp (_ptw_io_requestor_3_status_spp), // @[PTW.scala:802:19] .io_ptw_status_mpie (_ptw_io_requestor_3_status_mpie), // @[PTW.scala:802:19] .io_ptw_status_spie (_ptw_io_requestor_3_status_spie), // @[PTW.scala:802:19] .io_ptw_status_mie (_ptw_io_requestor_3_status_mie), // @[PTW.scala:802:19] .io_ptw_status_sie (_ptw_io_requestor_3_status_sie), // @[PTW.scala:802:19] .io_ptw_hstatus_spvp (_ptw_io_requestor_3_hstatus_spvp), // @[PTW.scala:802:19] .io_ptw_hstatus_spv (_ptw_io_requestor_3_hstatus_spv), // @[PTW.scala:802:19] .io_ptw_hstatus_gva (_ptw_io_requestor_3_hstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_debug (_ptw_io_requestor_3_gstatus_debug), // @[PTW.scala:802:19] .io_ptw_gstatus_cease (_ptw_io_requestor_3_gstatus_cease), // @[PTW.scala:802:19] .io_ptw_gstatus_wfi (_ptw_io_requestor_3_gstatus_wfi), // @[PTW.scala:802:19] .io_ptw_gstatus_isa (_ptw_io_requestor_3_gstatus_isa), // @[PTW.scala:802:19] .io_ptw_gstatus_dprv (_ptw_io_requestor_3_gstatus_dprv), // @[PTW.scala:802:19] .io_ptw_gstatus_dv (_ptw_io_requestor_3_gstatus_dv), // @[PTW.scala:802:19] .io_ptw_gstatus_prv (_ptw_io_requestor_3_gstatus_prv), // @[PTW.scala:802:19] .io_ptw_gstatus_v (_ptw_io_requestor_3_gstatus_v), // @[PTW.scala:802:19] .io_ptw_gstatus_zero2 (_ptw_io_requestor_3_gstatus_zero2), // @[PTW.scala:802:19] .io_ptw_gstatus_mpv (_ptw_io_requestor_3_gstatus_mpv), // @[PTW.scala:802:19] .io_ptw_gstatus_gva (_ptw_io_requestor_3_gstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_mbe (_ptw_io_requestor_3_gstatus_mbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sbe (_ptw_io_requestor_3_gstatus_sbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sxl (_ptw_io_requestor_3_gstatus_sxl), // @[PTW.scala:802:19] .io_ptw_gstatus_zero1 (_ptw_io_requestor_3_gstatus_zero1), // @[PTW.scala:802:19] .io_ptw_gstatus_tsr (_ptw_io_requestor_3_gstatus_tsr), // @[PTW.scala:802:19] .io_ptw_gstatus_tw (_ptw_io_requestor_3_gstatus_tw), // @[PTW.scala:802:19] .io_ptw_gstatus_tvm (_ptw_io_requestor_3_gstatus_tvm), // @[PTW.scala:802:19] .io_ptw_gstatus_mxr (_ptw_io_requestor_3_gstatus_mxr), // @[PTW.scala:802:19] .io_ptw_gstatus_sum (_ptw_io_requestor_3_gstatus_sum), // @[PTW.scala:802:19] .io_ptw_gstatus_mprv (_ptw_io_requestor_3_gstatus_mprv), // @[PTW.scala:802:19] .io_ptw_gstatus_fs (_ptw_io_requestor_3_gstatus_fs), // @[PTW.scala:802:19] .io_ptw_gstatus_mpp (_ptw_io_requestor_3_gstatus_mpp), // @[PTW.scala:802:19] .io_ptw_gstatus_vs (_ptw_io_requestor_3_gstatus_vs), // @[PTW.scala:802:19] .io_ptw_gstatus_spp (_ptw_io_requestor_3_gstatus_spp), // @[PTW.scala:802:19] .io_ptw_gstatus_mpie (_ptw_io_requestor_3_gstatus_mpie), // @[PTW.scala:802:19] .io_ptw_gstatus_ube (_ptw_io_requestor_3_gstatus_ube), // @[PTW.scala:802:19] .io_ptw_gstatus_spie (_ptw_io_requestor_3_gstatus_spie), // @[PTW.scala:802:19] .io_ptw_gstatus_upie (_ptw_io_requestor_3_gstatus_upie), // @[PTW.scala:802:19] .io_ptw_gstatus_mie (_ptw_io_requestor_3_gstatus_mie), // @[PTW.scala:802:19] .io_ptw_gstatus_hie (_ptw_io_requestor_3_gstatus_hie), // @[PTW.scala:802:19] .io_ptw_gstatus_sie (_ptw_io_requestor_3_gstatus_sie), // @[PTW.scala:802:19] .io_ptw_gstatus_uie (_ptw_io_requestor_3_gstatus_uie), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_l (_ptw_io_requestor_3_pmp_0_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_a (_ptw_io_requestor_3_pmp_0_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_x (_ptw_io_requestor_3_pmp_0_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_w (_ptw_io_requestor_3_pmp_0_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_r (_ptw_io_requestor_3_pmp_0_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_0_addr (_ptw_io_requestor_3_pmp_0_addr), // @[PTW.scala:802:19] .io_ptw_pmp_0_mask (_ptw_io_requestor_3_pmp_0_mask), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_l (_ptw_io_requestor_3_pmp_1_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_a (_ptw_io_requestor_3_pmp_1_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_x (_ptw_io_requestor_3_pmp_1_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_w (_ptw_io_requestor_3_pmp_1_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_r (_ptw_io_requestor_3_pmp_1_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_1_addr (_ptw_io_requestor_3_pmp_1_addr), // @[PTW.scala:802:19] .io_ptw_pmp_1_mask (_ptw_io_requestor_3_pmp_1_mask), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_l (_ptw_io_requestor_3_pmp_2_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_a (_ptw_io_requestor_3_pmp_2_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_x (_ptw_io_requestor_3_pmp_2_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_w (_ptw_io_requestor_3_pmp_2_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_r (_ptw_io_requestor_3_pmp_2_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_2_addr (_ptw_io_requestor_3_pmp_2_addr), // @[PTW.scala:802:19] .io_ptw_pmp_2_mask (_ptw_io_requestor_3_pmp_2_mask), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_l (_ptw_io_requestor_3_pmp_3_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_a (_ptw_io_requestor_3_pmp_3_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_x (_ptw_io_requestor_3_pmp_3_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_w (_ptw_io_requestor_3_pmp_3_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_r (_ptw_io_requestor_3_pmp_3_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_3_addr (_ptw_io_requestor_3_pmp_3_addr), // @[PTW.scala:802:19] .io_ptw_pmp_3_mask (_ptw_io_requestor_3_pmp_3_mask), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_l (_ptw_io_requestor_3_pmp_4_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_a (_ptw_io_requestor_3_pmp_4_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_x (_ptw_io_requestor_3_pmp_4_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_w (_ptw_io_requestor_3_pmp_4_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_r (_ptw_io_requestor_3_pmp_4_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_4_addr (_ptw_io_requestor_3_pmp_4_addr), // @[PTW.scala:802:19] .io_ptw_pmp_4_mask (_ptw_io_requestor_3_pmp_4_mask), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_l (_ptw_io_requestor_3_pmp_5_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_a (_ptw_io_requestor_3_pmp_5_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_x (_ptw_io_requestor_3_pmp_5_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_w (_ptw_io_requestor_3_pmp_5_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_r (_ptw_io_requestor_3_pmp_5_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_5_addr (_ptw_io_requestor_3_pmp_5_addr), // @[PTW.scala:802:19] .io_ptw_pmp_5_mask (_ptw_io_requestor_3_pmp_5_mask), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_l (_ptw_io_requestor_3_pmp_6_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_a (_ptw_io_requestor_3_pmp_6_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_x (_ptw_io_requestor_3_pmp_6_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_w (_ptw_io_requestor_3_pmp_6_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_r (_ptw_io_requestor_3_pmp_6_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_6_addr (_ptw_io_requestor_3_pmp_6_addr), // @[PTW.scala:802:19] .io_ptw_pmp_6_mask (_ptw_io_requestor_3_pmp_6_mask), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_l (_ptw_io_requestor_3_pmp_7_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_a (_ptw_io_requestor_3_pmp_7_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_x (_ptw_io_requestor_3_pmp_7_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_w (_ptw_io_requestor_3_pmp_7_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_r (_ptw_io_requestor_3_pmp_7_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_7_addr (_ptw_io_requestor_3_pmp_7_addr), // @[PTW.scala:802:19] .io_ptw_pmp_7_mask (_ptw_io_requestor_3_pmp_7_mask), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_ren (_ptw_io_requestor_3_customCSRs_csrs_0_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wen (_ptw_io_requestor_3_customCSRs_csrs_0_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wdata (_ptw_io_requestor_3_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_value (_ptw_io_requestor_3_customCSRs_csrs_0_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_ren (_ptw_io_requestor_3_customCSRs_csrs_1_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wen (_ptw_io_requestor_3_customCSRs_csrs_1_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wdata (_ptw_io_requestor_3_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_value (_ptw_io_requestor_3_customCSRs_csrs_1_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_ren (_ptw_io_requestor_3_customCSRs_csrs_2_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wen (_ptw_io_requestor_3_customCSRs_csrs_2_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wdata (_ptw_io_requestor_3_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_value (_ptw_io_requestor_3_customCSRs_csrs_2_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_ren (_ptw_io_requestor_3_customCSRs_csrs_3_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wen (_ptw_io_requestor_3_customCSRs_csrs_3_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wdata (_ptw_io_requestor_3_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_value (_ptw_io_requestor_3_customCSRs_csrs_3_value) // @[PTW.scala:802:19] ); // @[Frontend.scala:393:28] TLFragmenter fragmenter ( // @[Fragmenter.scala:345:34] .clock (clock), .reset (reset) ); // @[Fragmenter.scala:345:34] ReRoCCBuffer rerocc_buffer ( // @[Protocol.scala:134:35] .clock (clock), .reset (reset), .auto_in_req_ready (_rerocc_buffer_auto_in_req_ready), .auto_in_req_valid (_rerocc_client_auto_re_ro_cc_out_req_valid), // @[Configs.scala:14:35] .auto_in_req_bits_opcode (_rerocc_client_auto_re_ro_cc_out_req_bits_opcode), // @[Configs.scala:14:35] .auto_in_req_bits_client_id (_rerocc_client_auto_re_ro_cc_out_req_bits_client_id), // @[Configs.scala:14:35] .auto_in_req_bits_manager_id (_rerocc_client_auto_re_ro_cc_out_req_bits_manager_id), // @[Configs.scala:14:35] .auto_in_req_bits_data (_rerocc_client_auto_re_ro_cc_out_req_bits_data), // @[Configs.scala:14:35] .auto_in_resp_ready (_rerocc_client_auto_re_ro_cc_out_resp_ready), // @[Configs.scala:14:35] .auto_in_resp_valid (_rerocc_buffer_auto_in_resp_valid), .auto_in_resp_bits_opcode (_rerocc_buffer_auto_in_resp_bits_opcode), .auto_in_resp_bits_client_id (_rerocc_buffer_auto_in_resp_bits_client_id), .auto_in_resp_bits_manager_id (_rerocc_buffer_auto_in_resp_bits_manager_id), .auto_in_resp_bits_data (_rerocc_buffer_auto_in_resp_bits_data), .auto_out_req_ready (auto_rerocc_buffer_out_req_ready_0), // @[RocketTile.scala:141:7] .auto_out_req_valid (auto_rerocc_buffer_out_req_valid_0), .auto_out_req_bits_opcode (auto_rerocc_buffer_out_req_bits_opcode_0), .auto_out_req_bits_client_id (auto_rerocc_buffer_out_req_bits_client_id_0), .auto_out_req_bits_manager_id (auto_rerocc_buffer_out_req_bits_manager_id_0), .auto_out_req_bits_data (auto_rerocc_buffer_out_req_bits_data_0), .auto_out_resp_ready (auto_rerocc_buffer_out_resp_ready_0), .auto_out_resp_valid (auto_rerocc_buffer_out_resp_valid_0), // @[RocketTile.scala:141:7] .auto_out_resp_bits_opcode (auto_rerocc_buffer_out_resp_bits_opcode_0), // @[RocketTile.scala:141:7] .auto_out_resp_bits_client_id (auto_rerocc_buffer_out_resp_bits_client_id_0), // @[RocketTile.scala:141:7] .auto_out_resp_bits_manager_id (auto_rerocc_buffer_out_resp_bits_manager_id_0), // @[RocketTile.scala:141:7] .auto_out_resp_bits_data (auto_rerocc_buffer_out_resp_bits_data_0) // @[RocketTile.scala:141:7] ); // @[Protocol.scala:134:35] FPU fpuOpt ( // @[RocketTile.scala:242:62] .clock (clock), .reset (reset), .io_hartid (_core_io_fpu_hartid), // @[RocketTile.scala:147:20] .io_time (_core_io_fpu_time), // @[RocketTile.scala:147:20] .io_inst (_core_io_fpu_inst), // @[RocketTile.scala:147:20] .io_fromint_data (_core_io_fpu_fromint_data), // @[RocketTile.scala:147:20] .io_fcsr_rm (_core_io_fpu_fcsr_rm), // @[RocketTile.scala:147:20] .io_fcsr_flags_valid (_fpuOpt_io_fcsr_flags_valid), .io_fcsr_flags_bits (_fpuOpt_io_fcsr_flags_bits), .io_store_data (_fpuOpt_io_store_data), .io_toint_data (_fpuOpt_io_toint_data), .io_ll_resp_val (_core_io_fpu_ll_resp_val), // @[RocketTile.scala:147:20] .io_ll_resp_type (_core_io_fpu_ll_resp_type), // @[RocketTile.scala:147:20] .io_ll_resp_tag (_core_io_fpu_ll_resp_tag), // @[RocketTile.scala:147:20] .io_ll_resp_data (_core_io_fpu_ll_resp_data), // @[RocketTile.scala:147:20] .io_valid (_core_io_fpu_valid), // @[RocketTile.scala:147:20] .io_fcsr_rdy (_fpuOpt_io_fcsr_rdy), .io_nack_mem (_fpuOpt_io_nack_mem), .io_illegal_rm (_fpuOpt_io_illegal_rm), .io_killx (_core_io_fpu_killx), // @[RocketTile.scala:147:20] .io_killm (_core_io_fpu_killm), // @[RocketTile.scala:147:20] .io_dec_ldst (_fpuOpt_io_dec_ldst), .io_dec_wen (_fpuOpt_io_dec_wen), .io_dec_ren1 (_fpuOpt_io_dec_ren1), .io_dec_ren2 (_fpuOpt_io_dec_ren2), .io_dec_ren3 (_fpuOpt_io_dec_ren3), .io_dec_swap12 (_fpuOpt_io_dec_swap12), .io_dec_swap23 (_fpuOpt_io_dec_swap23), .io_dec_typeTagIn (_fpuOpt_io_dec_typeTagIn), .io_dec_typeTagOut (_fpuOpt_io_dec_typeTagOut), .io_dec_fromint (_fpuOpt_io_dec_fromint), .io_dec_toint (_fpuOpt_io_dec_toint), .io_dec_fastpipe (_fpuOpt_io_dec_fastpipe), .io_dec_fma (_fpuOpt_io_dec_fma), .io_dec_div (_fpuOpt_io_dec_div), .io_dec_sqrt (_fpuOpt_io_dec_sqrt), .io_dec_wflags (_fpuOpt_io_dec_wflags), .io_dec_vec (_fpuOpt_io_dec_vec), .io_sboard_set (_fpuOpt_io_sboard_set), .io_sboard_clr (_fpuOpt_io_sboard_clr), .io_sboard_clra (_fpuOpt_io_sboard_clra), .io_keep_clock_enabled (_core_io_fpu_keep_clock_enabled) // @[RocketTile.scala:147:20] ); // @[RocketTile.scala:242:62] HellaCacheArbiter dcacheArb ( // @[HellaCache.scala:292:25] .clock (clock), .reset (reset), .io_requestor_0_req_ready (_dcacheArb_io_requestor_0_req_ready), .io_requestor_0_req_valid (_ptw_io_mem_req_valid), // @[PTW.scala:802:19] .io_requestor_0_req_bits_addr (_ptw_io_mem_req_bits_addr), // @[PTW.scala:802:19] .io_requestor_0_req_bits_dv (_ptw_io_mem_req_bits_dv), // @[PTW.scala:802:19] .io_requestor_0_s1_kill (_ptw_io_mem_s1_kill), // @[PTW.scala:802:19] .io_requestor_0_s2_nack (_dcacheArb_io_requestor_0_s2_nack), .io_requestor_0_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), .io_requestor_0_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), .io_requestor_0_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), .io_requestor_0_resp_valid (_dcacheArb_io_requestor_0_resp_valid), .io_requestor_0_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), .io_requestor_0_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), .io_requestor_0_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), .io_requestor_0_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), .io_requestor_0_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), .io_requestor_0_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), .io_requestor_0_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), .io_requestor_0_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), .io_requestor_0_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), .io_requestor_0_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), .io_requestor_0_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), .io_requestor_0_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), .io_requestor_0_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), .io_requestor_0_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), .io_requestor_0_replay_next (_dcacheArb_io_requestor_0_replay_next), .io_requestor_0_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), .io_requestor_0_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), .io_requestor_0_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), .io_requestor_0_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), .io_requestor_0_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), .io_requestor_0_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), .io_requestor_0_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), .io_requestor_0_ordered (_dcacheArb_io_requestor_0_ordered), .io_requestor_0_store_pending (_dcacheArb_io_requestor_0_store_pending), .io_requestor_0_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), .io_requestor_0_perf_release (_dcacheArb_io_requestor_0_perf_release), .io_requestor_0_perf_grant (_dcacheArb_io_requestor_0_perf_grant), .io_requestor_0_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss), .io_requestor_0_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), .io_requestor_0_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), .io_requestor_0_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), .io_requestor_0_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), .io_requestor_0_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), .io_requestor_0_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), .io_requestor_1_req_ready (_dcacheArb_io_requestor_1_req_ready), .io_requestor_1_req_valid (_dcIF_io_cache_req_valid), // @[LazyRoCC.scala:106:24] .io_requestor_1_s1_data_data (_dcIF_io_cache_s1_data_data), // @[LazyRoCC.scala:106:24] .io_requestor_1_s1_data_mask (_dcIF_io_cache_s1_data_mask), // @[LazyRoCC.scala:106:24] .io_requestor_1_s2_nack (_dcacheArb_io_requestor_1_s2_nack), .io_requestor_1_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), .io_requestor_1_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), .io_requestor_1_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), .io_requestor_1_resp_valid (_dcacheArb_io_requestor_1_resp_valid), .io_requestor_1_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), .io_requestor_1_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), .io_requestor_1_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), .io_requestor_1_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), .io_requestor_1_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), .io_requestor_1_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), .io_requestor_1_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), .io_requestor_1_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), .io_requestor_1_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), .io_requestor_1_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), .io_requestor_1_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), .io_requestor_1_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), .io_requestor_1_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), .io_requestor_1_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), .io_requestor_1_replay_next (_dcacheArb_io_requestor_1_replay_next), .io_requestor_1_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), .io_requestor_1_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), .io_requestor_1_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), .io_requestor_1_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), .io_requestor_1_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), .io_requestor_1_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), .io_requestor_1_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), .io_requestor_1_ordered (_dcacheArb_io_requestor_1_ordered), .io_requestor_1_store_pending (_dcacheArb_io_requestor_1_store_pending), .io_requestor_1_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), .io_requestor_1_perf_release (_dcacheArb_io_requestor_1_perf_release), .io_requestor_1_perf_grant (_dcacheArb_io_requestor_1_perf_grant), .io_requestor_1_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss), .io_requestor_1_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), .io_requestor_1_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), .io_requestor_1_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), .io_requestor_1_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), .io_requestor_1_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), .io_requestor_1_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore), .io_requestor_2_req_ready (_dcacheArb_io_requestor_2_req_ready), .io_requestor_2_req_valid (_core_io_dmem_req_valid), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_addr (_core_io_dmem_req_bits_addr), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_tag (_core_io_dmem_req_bits_tag), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_cmd (_core_io_dmem_req_bits_cmd), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_size (_core_io_dmem_req_bits_size), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_signed (_core_io_dmem_req_bits_signed), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_dprv (_core_io_dmem_req_bits_dprv), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_dv (_core_io_dmem_req_bits_dv), // @[RocketTile.scala:147:20] .io_requestor_2_req_bits_no_resp (_core_io_dmem_req_bits_no_resp), // @[RocketTile.scala:147:20] .io_requestor_2_s1_kill (_core_io_dmem_s1_kill), // @[RocketTile.scala:147:20] .io_requestor_2_s1_data_data (_core_io_dmem_s1_data_data), // @[RocketTile.scala:147:20] .io_requestor_2_s2_nack (_dcacheArb_io_requestor_2_s2_nack), .io_requestor_2_s2_nack_cause_raw (_dcacheArb_io_requestor_2_s2_nack_cause_raw), .io_requestor_2_s2_uncached (_dcacheArb_io_requestor_2_s2_uncached), .io_requestor_2_s2_paddr (_dcacheArb_io_requestor_2_s2_paddr), .io_requestor_2_resp_valid (_dcacheArb_io_requestor_2_resp_valid), .io_requestor_2_resp_bits_addr (_dcacheArb_io_requestor_2_resp_bits_addr), .io_requestor_2_resp_bits_tag (_dcacheArb_io_requestor_2_resp_bits_tag), .io_requestor_2_resp_bits_cmd (_dcacheArb_io_requestor_2_resp_bits_cmd), .io_requestor_2_resp_bits_size (_dcacheArb_io_requestor_2_resp_bits_size), .io_requestor_2_resp_bits_signed (_dcacheArb_io_requestor_2_resp_bits_signed), .io_requestor_2_resp_bits_dprv (_dcacheArb_io_requestor_2_resp_bits_dprv), .io_requestor_2_resp_bits_dv (_dcacheArb_io_requestor_2_resp_bits_dv), .io_requestor_2_resp_bits_data (_dcacheArb_io_requestor_2_resp_bits_data), .io_requestor_2_resp_bits_mask (_dcacheArb_io_requestor_2_resp_bits_mask), .io_requestor_2_resp_bits_replay (_dcacheArb_io_requestor_2_resp_bits_replay), .io_requestor_2_resp_bits_has_data (_dcacheArb_io_requestor_2_resp_bits_has_data), .io_requestor_2_resp_bits_data_word_bypass (_dcacheArb_io_requestor_2_resp_bits_data_word_bypass), .io_requestor_2_resp_bits_data_raw (_dcacheArb_io_requestor_2_resp_bits_data_raw), .io_requestor_2_resp_bits_store_data (_dcacheArb_io_requestor_2_resp_bits_store_data), .io_requestor_2_replay_next (_dcacheArb_io_requestor_2_replay_next), .io_requestor_2_s2_xcpt_ma_ld (_dcacheArb_io_requestor_2_s2_xcpt_ma_ld), .io_requestor_2_s2_xcpt_ma_st (_dcacheArb_io_requestor_2_s2_xcpt_ma_st), .io_requestor_2_s2_xcpt_pf_ld (_dcacheArb_io_requestor_2_s2_xcpt_pf_ld), .io_requestor_2_s2_xcpt_pf_st (_dcacheArb_io_requestor_2_s2_xcpt_pf_st), .io_requestor_2_s2_xcpt_ae_ld (_dcacheArb_io_requestor_2_s2_xcpt_ae_ld), .io_requestor_2_s2_xcpt_ae_st (_dcacheArb_io_requestor_2_s2_xcpt_ae_st), .io_requestor_2_s2_gpa (_dcacheArb_io_requestor_2_s2_gpa), .io_requestor_2_ordered (_dcacheArb_io_requestor_2_ordered), .io_requestor_2_store_pending (_dcacheArb_io_requestor_2_store_pending), .io_requestor_2_perf_acquire (_dcacheArb_io_requestor_2_perf_acquire), .io_requestor_2_perf_release (_dcacheArb_io_requestor_2_perf_release), .io_requestor_2_perf_grant (_dcacheArb_io_requestor_2_perf_grant), .io_requestor_2_perf_tlbMiss (_dcacheArb_io_requestor_2_perf_tlbMiss), .io_requestor_2_perf_blocked (_dcacheArb_io_requestor_2_perf_blocked), .io_requestor_2_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_2_perf_canAcceptStoreThenLoad), .io_requestor_2_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_2_perf_canAcceptStoreThenRMW), .io_requestor_2_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_2_perf_canAcceptLoadThenLoad), .io_requestor_2_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_2_perf_storeBufferEmptyAfterLoad), .io_requestor_2_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_2_perf_storeBufferEmptyAfterStore), .io_requestor_2_keep_clock_enabled (_core_io_dmem_keep_clock_enabled), // @[RocketTile.scala:147:20] .io_mem_req_ready (_dcache_io_cpu_req_ready), // @[HellaCache.scala:278:43] .io_mem_req_valid (_dcacheArb_io_mem_req_valid), .io_mem_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), .io_mem_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), .io_mem_req_bits_cmd (_dcacheArb_io_mem_req_bits_cmd), .io_mem_req_bits_size (_dcacheArb_io_mem_req_bits_size), .io_mem_req_bits_signed (_dcacheArb_io_mem_req_bits_signed), .io_mem_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), .io_mem_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), .io_mem_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), .io_mem_req_bits_no_resp (_dcacheArb_io_mem_req_bits_no_resp), .io_mem_s1_kill (_dcacheArb_io_mem_s1_kill), .io_mem_s1_data_data (_dcacheArb_io_mem_s1_data_data), .io_mem_s1_data_mask (_dcacheArb_io_mem_s1_data_mask), .io_mem_s2_nack (_dcache_io_cpu_s2_nack), // @[HellaCache.scala:278:43] .io_mem_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), // @[HellaCache.scala:278:43] .io_mem_s2_uncached (_dcache_io_cpu_s2_uncached), // @[HellaCache.scala:278:43] .io_mem_s2_paddr (_dcache_io_cpu_s2_paddr), // @[HellaCache.scala:278:43] .io_mem_resp_valid (_dcache_io_cpu_resp_valid), // @[HellaCache.scala:278:43] .io_mem_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), // @[HellaCache.scala:278:43] .io_mem_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), // @[HellaCache.scala:278:43] .io_mem_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), // @[HellaCache.scala:278:43] .io_mem_resp_bits_size (_dcache_io_cpu_resp_bits_size), // @[HellaCache.scala:278:43] .io_mem_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), // @[HellaCache.scala:278:43] .io_mem_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), // @[HellaCache.scala:278:43] .io_mem_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), // @[HellaCache.scala:278:43] .io_mem_resp_bits_data (_dcache_io_cpu_resp_bits_data), // @[HellaCache.scala:278:43] .io_mem_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), // @[HellaCache.scala:278:43] .io_mem_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), // @[HellaCache.scala:278:43] .io_mem_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), // @[HellaCache.scala:278:43] .io_mem_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), // @[HellaCache.scala:278:43] .io_mem_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), // @[HellaCache.scala:278:43] .io_mem_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), // @[HellaCache.scala:278:43] .io_mem_replay_next (_dcache_io_cpu_replay_next), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), // @[HellaCache.scala:278:43] .io_mem_s2_gpa (_dcache_io_cpu_s2_gpa), // @[HellaCache.scala:278:43] .io_mem_ordered (_dcache_io_cpu_ordered), // @[HellaCache.scala:278:43] .io_mem_store_pending (_dcache_io_cpu_store_pending), // @[HellaCache.scala:278:43] .io_mem_perf_acquire (_dcache_io_cpu_perf_acquire), // @[HellaCache.scala:278:43] .io_mem_perf_release (_dcache_io_cpu_perf_release), // @[HellaCache.scala:278:43] .io_mem_perf_grant (_dcache_io_cpu_perf_grant), // @[HellaCache.scala:278:43] .io_mem_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss), // @[HellaCache.scala:278:43] .io_mem_perf_blocked (_dcache_io_cpu_perf_blocked), // @[HellaCache.scala:278:43] .io_mem_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:278:43] .io_mem_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:278:43] .io_mem_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:278:43] .io_mem_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:278:43] .io_mem_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:278:43] .io_mem_keep_clock_enabled (_dcacheArb_io_mem_keep_clock_enabled) ); // @[HellaCache.scala:292:25] PTW ptw ( // @[PTW.scala:802:19] .clock (clock), .reset (reset), .io_requestor_0_req_ready (_ptw_io_requestor_0_req_ready), .io_requestor_0_resp_valid (_ptw_io_requestor_0_resp_valid), .io_requestor_0_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), .io_requestor_0_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), .io_requestor_0_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), .io_requestor_0_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), .io_requestor_0_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), .io_requestor_0_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), .io_requestor_0_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), .io_requestor_0_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), .io_requestor_0_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), .io_requestor_0_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), .io_requestor_0_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), .io_requestor_0_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), .io_requestor_0_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), .io_requestor_0_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), .io_requestor_0_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), .io_requestor_0_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), .io_requestor_0_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), .io_requestor_0_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), .io_requestor_0_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), .io_requestor_0_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), .io_requestor_0_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), .io_requestor_0_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), .io_requestor_0_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), .io_requestor_0_ptbr_mode (_ptw_io_requestor_0_ptbr_mode), .io_requestor_0_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn), .io_requestor_0_status_debug (_ptw_io_requestor_0_status_debug), .io_requestor_0_status_cease (_ptw_io_requestor_0_status_cease), .io_requestor_0_status_wfi (_ptw_io_requestor_0_status_wfi), .io_requestor_0_status_isa (_ptw_io_requestor_0_status_isa), .io_requestor_0_status_dprv (_ptw_io_requestor_0_status_dprv), .io_requestor_0_status_dv (_ptw_io_requestor_0_status_dv), .io_requestor_0_status_prv (_ptw_io_requestor_0_status_prv), .io_requestor_0_status_v (_ptw_io_requestor_0_status_v), .io_requestor_0_status_mpv (_ptw_io_requestor_0_status_mpv), .io_requestor_0_status_gva (_ptw_io_requestor_0_status_gva), .io_requestor_0_status_tsr (_ptw_io_requestor_0_status_tsr), .io_requestor_0_status_tw (_ptw_io_requestor_0_status_tw), .io_requestor_0_status_tvm (_ptw_io_requestor_0_status_tvm), .io_requestor_0_status_mxr (_ptw_io_requestor_0_status_mxr), .io_requestor_0_status_sum (_ptw_io_requestor_0_status_sum), .io_requestor_0_status_mprv (_ptw_io_requestor_0_status_mprv), .io_requestor_0_status_fs (_ptw_io_requestor_0_status_fs), .io_requestor_0_status_mpp (_ptw_io_requestor_0_status_mpp), .io_requestor_0_status_spp (_ptw_io_requestor_0_status_spp), .io_requestor_0_status_mpie (_ptw_io_requestor_0_status_mpie), .io_requestor_0_status_spie (_ptw_io_requestor_0_status_spie), .io_requestor_0_status_mie (_ptw_io_requestor_0_status_mie), .io_requestor_0_status_sie (_ptw_io_requestor_0_status_sie), .io_requestor_0_hstatus_spvp (_ptw_io_requestor_0_hstatus_spvp), .io_requestor_0_hstatus_spv (_ptw_io_requestor_0_hstatus_spv), .io_requestor_0_hstatus_gva (_ptw_io_requestor_0_hstatus_gva), .io_requestor_0_gstatus_debug (_ptw_io_requestor_0_gstatus_debug), .io_requestor_0_gstatus_cease (_ptw_io_requestor_0_gstatus_cease), .io_requestor_0_gstatus_wfi (_ptw_io_requestor_0_gstatus_wfi), .io_requestor_0_gstatus_isa (_ptw_io_requestor_0_gstatus_isa), .io_requestor_0_gstatus_dprv (_ptw_io_requestor_0_gstatus_dprv), .io_requestor_0_gstatus_dv (_ptw_io_requestor_0_gstatus_dv), .io_requestor_0_gstatus_prv (_ptw_io_requestor_0_gstatus_prv), .io_requestor_0_gstatus_v (_ptw_io_requestor_0_gstatus_v), .io_requestor_0_gstatus_zero2 (_ptw_io_requestor_0_gstatus_zero2), .io_requestor_0_gstatus_mpv (_ptw_io_requestor_0_gstatus_mpv), .io_requestor_0_gstatus_gva (_ptw_io_requestor_0_gstatus_gva), .io_requestor_0_gstatus_mbe (_ptw_io_requestor_0_gstatus_mbe), .io_requestor_0_gstatus_sbe (_ptw_io_requestor_0_gstatus_sbe), .io_requestor_0_gstatus_sxl (_ptw_io_requestor_0_gstatus_sxl), .io_requestor_0_gstatus_zero1 (_ptw_io_requestor_0_gstatus_zero1), .io_requestor_0_gstatus_tsr (_ptw_io_requestor_0_gstatus_tsr), .io_requestor_0_gstatus_tw (_ptw_io_requestor_0_gstatus_tw), .io_requestor_0_gstatus_tvm (_ptw_io_requestor_0_gstatus_tvm), .io_requestor_0_gstatus_mxr (_ptw_io_requestor_0_gstatus_mxr), .io_requestor_0_gstatus_sum (_ptw_io_requestor_0_gstatus_sum), .io_requestor_0_gstatus_mprv (_ptw_io_requestor_0_gstatus_mprv), .io_requestor_0_gstatus_fs (_ptw_io_requestor_0_gstatus_fs), .io_requestor_0_gstatus_mpp (_ptw_io_requestor_0_gstatus_mpp), .io_requestor_0_gstatus_vs (_ptw_io_requestor_0_gstatus_vs), .io_requestor_0_gstatus_spp (_ptw_io_requestor_0_gstatus_spp), .io_requestor_0_gstatus_mpie (_ptw_io_requestor_0_gstatus_mpie), .io_requestor_0_gstatus_ube (_ptw_io_requestor_0_gstatus_ube), .io_requestor_0_gstatus_spie (_ptw_io_requestor_0_gstatus_spie), .io_requestor_0_gstatus_upie (_ptw_io_requestor_0_gstatus_upie), .io_requestor_0_gstatus_mie (_ptw_io_requestor_0_gstatus_mie), .io_requestor_0_gstatus_hie (_ptw_io_requestor_0_gstatus_hie), .io_requestor_0_gstatus_sie (_ptw_io_requestor_0_gstatus_sie), .io_requestor_0_gstatus_uie (_ptw_io_requestor_0_gstatus_uie), .io_requestor_0_pmp_0_cfg_l (_ptw_io_requestor_0_pmp_0_cfg_l), .io_requestor_0_pmp_0_cfg_a (_ptw_io_requestor_0_pmp_0_cfg_a), .io_requestor_0_pmp_0_cfg_x (_ptw_io_requestor_0_pmp_0_cfg_x), .io_requestor_0_pmp_0_cfg_w (_ptw_io_requestor_0_pmp_0_cfg_w), .io_requestor_0_pmp_0_cfg_r (_ptw_io_requestor_0_pmp_0_cfg_r), .io_requestor_0_pmp_0_addr (_ptw_io_requestor_0_pmp_0_addr), .io_requestor_0_pmp_0_mask (_ptw_io_requestor_0_pmp_0_mask), .io_requestor_0_pmp_1_cfg_l (_ptw_io_requestor_0_pmp_1_cfg_l), .io_requestor_0_pmp_1_cfg_a (_ptw_io_requestor_0_pmp_1_cfg_a), .io_requestor_0_pmp_1_cfg_x (_ptw_io_requestor_0_pmp_1_cfg_x), .io_requestor_0_pmp_1_cfg_w (_ptw_io_requestor_0_pmp_1_cfg_w), .io_requestor_0_pmp_1_cfg_r (_ptw_io_requestor_0_pmp_1_cfg_r), .io_requestor_0_pmp_1_addr (_ptw_io_requestor_0_pmp_1_addr), .io_requestor_0_pmp_1_mask (_ptw_io_requestor_0_pmp_1_mask), .io_requestor_0_pmp_2_cfg_l (_ptw_io_requestor_0_pmp_2_cfg_l), .io_requestor_0_pmp_2_cfg_a (_ptw_io_requestor_0_pmp_2_cfg_a), .io_requestor_0_pmp_2_cfg_x (_ptw_io_requestor_0_pmp_2_cfg_x), .io_requestor_0_pmp_2_cfg_w (_ptw_io_requestor_0_pmp_2_cfg_w), .io_requestor_0_pmp_2_cfg_r (_ptw_io_requestor_0_pmp_2_cfg_r), .io_requestor_0_pmp_2_addr (_ptw_io_requestor_0_pmp_2_addr), .io_requestor_0_pmp_2_mask (_ptw_io_requestor_0_pmp_2_mask), .io_requestor_0_pmp_3_cfg_l (_ptw_io_requestor_0_pmp_3_cfg_l), .io_requestor_0_pmp_3_cfg_a (_ptw_io_requestor_0_pmp_3_cfg_a), .io_requestor_0_pmp_3_cfg_x (_ptw_io_requestor_0_pmp_3_cfg_x), .io_requestor_0_pmp_3_cfg_w (_ptw_io_requestor_0_pmp_3_cfg_w), .io_requestor_0_pmp_3_cfg_r (_ptw_io_requestor_0_pmp_3_cfg_r), .io_requestor_0_pmp_3_addr (_ptw_io_requestor_0_pmp_3_addr), .io_requestor_0_pmp_3_mask (_ptw_io_requestor_0_pmp_3_mask), .io_requestor_0_pmp_4_cfg_l (_ptw_io_requestor_0_pmp_4_cfg_l), .io_requestor_0_pmp_4_cfg_a (_ptw_io_requestor_0_pmp_4_cfg_a), .io_requestor_0_pmp_4_cfg_x (_ptw_io_requestor_0_pmp_4_cfg_x), .io_requestor_0_pmp_4_cfg_w (_ptw_io_requestor_0_pmp_4_cfg_w), .io_requestor_0_pmp_4_cfg_r (_ptw_io_requestor_0_pmp_4_cfg_r), .io_requestor_0_pmp_4_addr (_ptw_io_requestor_0_pmp_4_addr), .io_requestor_0_pmp_4_mask (_ptw_io_requestor_0_pmp_4_mask), .io_requestor_0_pmp_5_cfg_l (_ptw_io_requestor_0_pmp_5_cfg_l), .io_requestor_0_pmp_5_cfg_a (_ptw_io_requestor_0_pmp_5_cfg_a), .io_requestor_0_pmp_5_cfg_x (_ptw_io_requestor_0_pmp_5_cfg_x), .io_requestor_0_pmp_5_cfg_w (_ptw_io_requestor_0_pmp_5_cfg_w), .io_requestor_0_pmp_5_cfg_r (_ptw_io_requestor_0_pmp_5_cfg_r), .io_requestor_0_pmp_5_addr (_ptw_io_requestor_0_pmp_5_addr), .io_requestor_0_pmp_5_mask (_ptw_io_requestor_0_pmp_5_mask), .io_requestor_0_pmp_6_cfg_l (_ptw_io_requestor_0_pmp_6_cfg_l), .io_requestor_0_pmp_6_cfg_a (_ptw_io_requestor_0_pmp_6_cfg_a), .io_requestor_0_pmp_6_cfg_x (_ptw_io_requestor_0_pmp_6_cfg_x), .io_requestor_0_pmp_6_cfg_w (_ptw_io_requestor_0_pmp_6_cfg_w), .io_requestor_0_pmp_6_cfg_r (_ptw_io_requestor_0_pmp_6_cfg_r), .io_requestor_0_pmp_6_addr (_ptw_io_requestor_0_pmp_6_addr), .io_requestor_0_pmp_6_mask (_ptw_io_requestor_0_pmp_6_mask), .io_requestor_0_pmp_7_cfg_l (_ptw_io_requestor_0_pmp_7_cfg_l), .io_requestor_0_pmp_7_cfg_a (_ptw_io_requestor_0_pmp_7_cfg_a), .io_requestor_0_pmp_7_cfg_x (_ptw_io_requestor_0_pmp_7_cfg_x), .io_requestor_0_pmp_7_cfg_w (_ptw_io_requestor_0_pmp_7_cfg_w), .io_requestor_0_pmp_7_cfg_r (_ptw_io_requestor_0_pmp_7_cfg_r), .io_requestor_0_pmp_7_addr (_ptw_io_requestor_0_pmp_7_addr), .io_requestor_0_pmp_7_mask (_ptw_io_requestor_0_pmp_7_mask), .io_requestor_0_customCSRs_csrs_0_ren (_ptw_io_requestor_0_customCSRs_csrs_0_ren), .io_requestor_0_customCSRs_csrs_0_wen (_ptw_io_requestor_0_customCSRs_csrs_0_wen), .io_requestor_0_customCSRs_csrs_0_wdata (_ptw_io_requestor_0_customCSRs_csrs_0_wdata), .io_requestor_0_customCSRs_csrs_0_value (_ptw_io_requestor_0_customCSRs_csrs_0_value), .io_requestor_0_customCSRs_csrs_1_ren (_ptw_io_requestor_0_customCSRs_csrs_1_ren), .io_requestor_0_customCSRs_csrs_1_wen (_ptw_io_requestor_0_customCSRs_csrs_1_wen), .io_requestor_0_customCSRs_csrs_1_wdata (_ptw_io_requestor_0_customCSRs_csrs_1_wdata), .io_requestor_0_customCSRs_csrs_1_value (_ptw_io_requestor_0_customCSRs_csrs_1_value), .io_requestor_0_customCSRs_csrs_2_ren (_ptw_io_requestor_0_customCSRs_csrs_2_ren), .io_requestor_0_customCSRs_csrs_2_wen (_ptw_io_requestor_0_customCSRs_csrs_2_wen), .io_requestor_0_customCSRs_csrs_2_wdata (_ptw_io_requestor_0_customCSRs_csrs_2_wdata), .io_requestor_0_customCSRs_csrs_2_value (_ptw_io_requestor_0_customCSRs_csrs_2_value), .io_requestor_0_customCSRs_csrs_3_ren (_ptw_io_requestor_0_customCSRs_csrs_3_ren), .io_requestor_0_customCSRs_csrs_3_wen (_ptw_io_requestor_0_customCSRs_csrs_3_wen), .io_requestor_0_customCSRs_csrs_3_wdata (_ptw_io_requestor_0_customCSRs_csrs_3_wdata), .io_requestor_0_customCSRs_csrs_3_value (_ptw_io_requestor_0_customCSRs_csrs_3_value), .io_requestor_1_req_ready (_ptw_io_requestor_1_req_ready), .io_requestor_1_resp_valid (_ptw_io_requestor_1_resp_valid), .io_requestor_1_resp_bits_ae_ptw (_ptw_io_requestor_1_resp_bits_ae_ptw), .io_requestor_1_resp_bits_ae_final (_ptw_io_requestor_1_resp_bits_ae_final), .io_requestor_1_resp_bits_pf (_ptw_io_requestor_1_resp_bits_pf), .io_requestor_1_resp_bits_gf (_ptw_io_requestor_1_resp_bits_gf), .io_requestor_1_resp_bits_hr (_ptw_io_requestor_1_resp_bits_hr), .io_requestor_1_resp_bits_hw (_ptw_io_requestor_1_resp_bits_hw), .io_requestor_1_resp_bits_hx (_ptw_io_requestor_1_resp_bits_hx), .io_requestor_1_resp_bits_pte_reserved_for_future (_ptw_io_requestor_1_resp_bits_pte_reserved_for_future), .io_requestor_1_resp_bits_pte_ppn (_ptw_io_requestor_1_resp_bits_pte_ppn), .io_requestor_1_resp_bits_pte_reserved_for_software (_ptw_io_requestor_1_resp_bits_pte_reserved_for_software), .io_requestor_1_resp_bits_pte_d (_ptw_io_requestor_1_resp_bits_pte_d), .io_requestor_1_resp_bits_pte_a (_ptw_io_requestor_1_resp_bits_pte_a), .io_requestor_1_resp_bits_pte_g (_ptw_io_requestor_1_resp_bits_pte_g), .io_requestor_1_resp_bits_pte_u (_ptw_io_requestor_1_resp_bits_pte_u), .io_requestor_1_resp_bits_pte_x (_ptw_io_requestor_1_resp_bits_pte_x), .io_requestor_1_resp_bits_pte_w (_ptw_io_requestor_1_resp_bits_pte_w), .io_requestor_1_resp_bits_pte_r (_ptw_io_requestor_1_resp_bits_pte_r), .io_requestor_1_resp_bits_pte_v (_ptw_io_requestor_1_resp_bits_pte_v), .io_requestor_1_resp_bits_level (_ptw_io_requestor_1_resp_bits_level), .io_requestor_1_resp_bits_homogeneous (_ptw_io_requestor_1_resp_bits_homogeneous), .io_requestor_1_resp_bits_gpa_valid (_ptw_io_requestor_1_resp_bits_gpa_valid), .io_requestor_1_resp_bits_gpa_bits (_ptw_io_requestor_1_resp_bits_gpa_bits), .io_requestor_1_resp_bits_gpa_is_pte (_ptw_io_requestor_1_resp_bits_gpa_is_pte), .io_requestor_1_ptbr_mode (_ptw_io_requestor_1_ptbr_mode), .io_requestor_1_ptbr_ppn (_ptw_io_requestor_1_ptbr_ppn), .io_requestor_1_status_debug (_ptw_io_requestor_1_status_debug), .io_requestor_1_status_cease (_ptw_io_requestor_1_status_cease), .io_requestor_1_status_wfi (_ptw_io_requestor_1_status_wfi), .io_requestor_1_status_isa (_ptw_io_requestor_1_status_isa), .io_requestor_1_status_dprv (_ptw_io_requestor_1_status_dprv), .io_requestor_1_status_dv (_ptw_io_requestor_1_status_dv), .io_requestor_1_status_prv (_ptw_io_requestor_1_status_prv), .io_requestor_1_status_v (_ptw_io_requestor_1_status_v), .io_requestor_1_status_mpv (_ptw_io_requestor_1_status_mpv), .io_requestor_1_status_gva (_ptw_io_requestor_1_status_gva), .io_requestor_1_status_tsr (_ptw_io_requestor_1_status_tsr), .io_requestor_1_status_tw (_ptw_io_requestor_1_status_tw), .io_requestor_1_status_tvm (_ptw_io_requestor_1_status_tvm), .io_requestor_1_status_mxr (_ptw_io_requestor_1_status_mxr), .io_requestor_1_status_sum (_ptw_io_requestor_1_status_sum), .io_requestor_1_status_mprv (_ptw_io_requestor_1_status_mprv), .io_requestor_1_status_fs (_ptw_io_requestor_1_status_fs), .io_requestor_1_status_mpp (_ptw_io_requestor_1_status_mpp), .io_requestor_1_status_spp (_ptw_io_requestor_1_status_spp), .io_requestor_1_status_mpie (_ptw_io_requestor_1_status_mpie), .io_requestor_1_status_spie (_ptw_io_requestor_1_status_spie), .io_requestor_1_status_mie (_ptw_io_requestor_1_status_mie), .io_requestor_1_status_sie (_ptw_io_requestor_1_status_sie), .io_requestor_1_hstatus_spvp (_ptw_io_requestor_1_hstatus_spvp), .io_requestor_1_hstatus_spv (_ptw_io_requestor_1_hstatus_spv), .io_requestor_1_hstatus_gva (_ptw_io_requestor_1_hstatus_gva), .io_requestor_1_gstatus_debug (_ptw_io_requestor_1_gstatus_debug), .io_requestor_1_gstatus_cease (_ptw_io_requestor_1_gstatus_cease), .io_requestor_1_gstatus_wfi (_ptw_io_requestor_1_gstatus_wfi), .io_requestor_1_gstatus_isa (_ptw_io_requestor_1_gstatus_isa), .io_requestor_1_gstatus_dprv (_ptw_io_requestor_1_gstatus_dprv), .io_requestor_1_gstatus_dv (_ptw_io_requestor_1_gstatus_dv), .io_requestor_1_gstatus_prv (_ptw_io_requestor_1_gstatus_prv), .io_requestor_1_gstatus_v (_ptw_io_requestor_1_gstatus_v), .io_requestor_1_gstatus_zero2 (_ptw_io_requestor_1_gstatus_zero2), .io_requestor_1_gstatus_mpv (_ptw_io_requestor_1_gstatus_mpv), .io_requestor_1_gstatus_gva (_ptw_io_requestor_1_gstatus_gva), .io_requestor_1_gstatus_mbe (_ptw_io_requestor_1_gstatus_mbe), .io_requestor_1_gstatus_sbe (_ptw_io_requestor_1_gstatus_sbe), .io_requestor_1_gstatus_sxl (_ptw_io_requestor_1_gstatus_sxl), .io_requestor_1_gstatus_zero1 (_ptw_io_requestor_1_gstatus_zero1), .io_requestor_1_gstatus_tsr (_ptw_io_requestor_1_gstatus_tsr), .io_requestor_1_gstatus_tw (_ptw_io_requestor_1_gstatus_tw), .io_requestor_1_gstatus_tvm (_ptw_io_requestor_1_gstatus_tvm), .io_requestor_1_gstatus_mxr (_ptw_io_requestor_1_gstatus_mxr), .io_requestor_1_gstatus_sum (_ptw_io_requestor_1_gstatus_sum), .io_requestor_1_gstatus_mprv (_ptw_io_requestor_1_gstatus_mprv), .io_requestor_1_gstatus_fs (_ptw_io_requestor_1_gstatus_fs), .io_requestor_1_gstatus_mpp (_ptw_io_requestor_1_gstatus_mpp), .io_requestor_1_gstatus_vs (_ptw_io_requestor_1_gstatus_vs), .io_requestor_1_gstatus_spp (_ptw_io_requestor_1_gstatus_spp), .io_requestor_1_gstatus_mpie (_ptw_io_requestor_1_gstatus_mpie), .io_requestor_1_gstatus_ube (_ptw_io_requestor_1_gstatus_ube), .io_requestor_1_gstatus_spie (_ptw_io_requestor_1_gstatus_spie), .io_requestor_1_gstatus_upie (_ptw_io_requestor_1_gstatus_upie), .io_requestor_1_gstatus_mie (_ptw_io_requestor_1_gstatus_mie), .io_requestor_1_gstatus_hie (_ptw_io_requestor_1_gstatus_hie), .io_requestor_1_gstatus_sie (_ptw_io_requestor_1_gstatus_sie), .io_requestor_1_gstatus_uie (_ptw_io_requestor_1_gstatus_uie), .io_requestor_1_pmp_0_cfg_l (_ptw_io_requestor_1_pmp_0_cfg_l), .io_requestor_1_pmp_0_cfg_a (_ptw_io_requestor_1_pmp_0_cfg_a), .io_requestor_1_pmp_0_cfg_x (_ptw_io_requestor_1_pmp_0_cfg_x), .io_requestor_1_pmp_0_cfg_w (_ptw_io_requestor_1_pmp_0_cfg_w), .io_requestor_1_pmp_0_cfg_r (_ptw_io_requestor_1_pmp_0_cfg_r), .io_requestor_1_pmp_0_addr (_ptw_io_requestor_1_pmp_0_addr), .io_requestor_1_pmp_0_mask (_ptw_io_requestor_1_pmp_0_mask), .io_requestor_1_pmp_1_cfg_l (_ptw_io_requestor_1_pmp_1_cfg_l), .io_requestor_1_pmp_1_cfg_a (_ptw_io_requestor_1_pmp_1_cfg_a), .io_requestor_1_pmp_1_cfg_x (_ptw_io_requestor_1_pmp_1_cfg_x), .io_requestor_1_pmp_1_cfg_w (_ptw_io_requestor_1_pmp_1_cfg_w), .io_requestor_1_pmp_1_cfg_r (_ptw_io_requestor_1_pmp_1_cfg_r), .io_requestor_1_pmp_1_addr (_ptw_io_requestor_1_pmp_1_addr), .io_requestor_1_pmp_1_mask (_ptw_io_requestor_1_pmp_1_mask), .io_requestor_1_pmp_2_cfg_l (_ptw_io_requestor_1_pmp_2_cfg_l), .io_requestor_1_pmp_2_cfg_a (_ptw_io_requestor_1_pmp_2_cfg_a), .io_requestor_1_pmp_2_cfg_x (_ptw_io_requestor_1_pmp_2_cfg_x), .io_requestor_1_pmp_2_cfg_w (_ptw_io_requestor_1_pmp_2_cfg_w), .io_requestor_1_pmp_2_cfg_r (_ptw_io_requestor_1_pmp_2_cfg_r), .io_requestor_1_pmp_2_addr (_ptw_io_requestor_1_pmp_2_addr), .io_requestor_1_pmp_2_mask (_ptw_io_requestor_1_pmp_2_mask), .io_requestor_1_pmp_3_cfg_l (_ptw_io_requestor_1_pmp_3_cfg_l), .io_requestor_1_pmp_3_cfg_a (_ptw_io_requestor_1_pmp_3_cfg_a), .io_requestor_1_pmp_3_cfg_x (_ptw_io_requestor_1_pmp_3_cfg_x), .io_requestor_1_pmp_3_cfg_w (_ptw_io_requestor_1_pmp_3_cfg_w), .io_requestor_1_pmp_3_cfg_r (_ptw_io_requestor_1_pmp_3_cfg_r), .io_requestor_1_pmp_3_addr (_ptw_io_requestor_1_pmp_3_addr), .io_requestor_1_pmp_3_mask (_ptw_io_requestor_1_pmp_3_mask), .io_requestor_1_pmp_4_cfg_l (_ptw_io_requestor_1_pmp_4_cfg_l), .io_requestor_1_pmp_4_cfg_a (_ptw_io_requestor_1_pmp_4_cfg_a), .io_requestor_1_pmp_4_cfg_x (_ptw_io_requestor_1_pmp_4_cfg_x), .io_requestor_1_pmp_4_cfg_w (_ptw_io_requestor_1_pmp_4_cfg_w), .io_requestor_1_pmp_4_cfg_r (_ptw_io_requestor_1_pmp_4_cfg_r), .io_requestor_1_pmp_4_addr (_ptw_io_requestor_1_pmp_4_addr), .io_requestor_1_pmp_4_mask (_ptw_io_requestor_1_pmp_4_mask), .io_requestor_1_pmp_5_cfg_l (_ptw_io_requestor_1_pmp_5_cfg_l), .io_requestor_1_pmp_5_cfg_a (_ptw_io_requestor_1_pmp_5_cfg_a), .io_requestor_1_pmp_5_cfg_x (_ptw_io_requestor_1_pmp_5_cfg_x), .io_requestor_1_pmp_5_cfg_w (_ptw_io_requestor_1_pmp_5_cfg_w), .io_requestor_1_pmp_5_cfg_r (_ptw_io_requestor_1_pmp_5_cfg_r), .io_requestor_1_pmp_5_addr (_ptw_io_requestor_1_pmp_5_addr), .io_requestor_1_pmp_5_mask (_ptw_io_requestor_1_pmp_5_mask), .io_requestor_1_pmp_6_cfg_l (_ptw_io_requestor_1_pmp_6_cfg_l), .io_requestor_1_pmp_6_cfg_a (_ptw_io_requestor_1_pmp_6_cfg_a), .io_requestor_1_pmp_6_cfg_x (_ptw_io_requestor_1_pmp_6_cfg_x), .io_requestor_1_pmp_6_cfg_w (_ptw_io_requestor_1_pmp_6_cfg_w), .io_requestor_1_pmp_6_cfg_r (_ptw_io_requestor_1_pmp_6_cfg_r), .io_requestor_1_pmp_6_addr (_ptw_io_requestor_1_pmp_6_addr), .io_requestor_1_pmp_6_mask (_ptw_io_requestor_1_pmp_6_mask), .io_requestor_1_pmp_7_cfg_l (_ptw_io_requestor_1_pmp_7_cfg_l), .io_requestor_1_pmp_7_cfg_a (_ptw_io_requestor_1_pmp_7_cfg_a), .io_requestor_1_pmp_7_cfg_x (_ptw_io_requestor_1_pmp_7_cfg_x), .io_requestor_1_pmp_7_cfg_w (_ptw_io_requestor_1_pmp_7_cfg_w), .io_requestor_1_pmp_7_cfg_r (_ptw_io_requestor_1_pmp_7_cfg_r), .io_requestor_1_pmp_7_addr (_ptw_io_requestor_1_pmp_7_addr), .io_requestor_1_pmp_7_mask (_ptw_io_requestor_1_pmp_7_mask), .io_requestor_1_customCSRs_csrs_0_ren (_ptw_io_requestor_1_customCSRs_csrs_0_ren), .io_requestor_1_customCSRs_csrs_0_wen (_ptw_io_requestor_1_customCSRs_csrs_0_wen), .io_requestor_1_customCSRs_csrs_0_wdata (_ptw_io_requestor_1_customCSRs_csrs_0_wdata), .io_requestor_1_customCSRs_csrs_0_value (_ptw_io_requestor_1_customCSRs_csrs_0_value), .io_requestor_1_customCSRs_csrs_1_ren (_ptw_io_requestor_1_customCSRs_csrs_1_ren), .io_requestor_1_customCSRs_csrs_1_wen (_ptw_io_requestor_1_customCSRs_csrs_1_wen), .io_requestor_1_customCSRs_csrs_1_wdata (_ptw_io_requestor_1_customCSRs_csrs_1_wdata), .io_requestor_1_customCSRs_csrs_1_value (_ptw_io_requestor_1_customCSRs_csrs_1_value), .io_requestor_1_customCSRs_csrs_2_ren (_ptw_io_requestor_1_customCSRs_csrs_2_ren), .io_requestor_1_customCSRs_csrs_2_wen (_ptw_io_requestor_1_customCSRs_csrs_2_wen), .io_requestor_1_customCSRs_csrs_2_wdata (_ptw_io_requestor_1_customCSRs_csrs_2_wdata), .io_requestor_1_customCSRs_csrs_2_value (_ptw_io_requestor_1_customCSRs_csrs_2_value), .io_requestor_1_customCSRs_csrs_3_ren (_ptw_io_requestor_1_customCSRs_csrs_3_ren), .io_requestor_1_customCSRs_csrs_3_wen (_ptw_io_requestor_1_customCSRs_csrs_3_wen), .io_requestor_1_customCSRs_csrs_3_wdata (_ptw_io_requestor_1_customCSRs_csrs_3_wdata), .io_requestor_1_customCSRs_csrs_3_value (_ptw_io_requestor_1_customCSRs_csrs_3_value), .io_requestor_2_req_ready (_ptw_io_requestor_2_req_ready), .io_requestor_2_req_valid (_dcache_io_ptw_req_valid), // @[HellaCache.scala:278:43] .io_requestor_2_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), // @[HellaCache.scala:278:43] .io_requestor_2_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), // @[HellaCache.scala:278:43] .io_requestor_2_resp_valid (_ptw_io_requestor_2_resp_valid), .io_requestor_2_resp_bits_ae_ptw (_ptw_io_requestor_2_resp_bits_ae_ptw), .io_requestor_2_resp_bits_ae_final (_ptw_io_requestor_2_resp_bits_ae_final), .io_requestor_2_resp_bits_pf (_ptw_io_requestor_2_resp_bits_pf), .io_requestor_2_resp_bits_gf (_ptw_io_requestor_2_resp_bits_gf), .io_requestor_2_resp_bits_hr (_ptw_io_requestor_2_resp_bits_hr), .io_requestor_2_resp_bits_hw (_ptw_io_requestor_2_resp_bits_hw), .io_requestor_2_resp_bits_hx (_ptw_io_requestor_2_resp_bits_hx), .io_requestor_2_resp_bits_pte_reserved_for_future (_ptw_io_requestor_2_resp_bits_pte_reserved_for_future), .io_requestor_2_resp_bits_pte_ppn (_ptw_io_requestor_2_resp_bits_pte_ppn), .io_requestor_2_resp_bits_pte_reserved_for_software (_ptw_io_requestor_2_resp_bits_pte_reserved_for_software), .io_requestor_2_resp_bits_pte_d (_ptw_io_requestor_2_resp_bits_pte_d), .io_requestor_2_resp_bits_pte_a (_ptw_io_requestor_2_resp_bits_pte_a), .io_requestor_2_resp_bits_pte_g (_ptw_io_requestor_2_resp_bits_pte_g), .io_requestor_2_resp_bits_pte_u (_ptw_io_requestor_2_resp_bits_pte_u), .io_requestor_2_resp_bits_pte_x (_ptw_io_requestor_2_resp_bits_pte_x), .io_requestor_2_resp_bits_pte_w (_ptw_io_requestor_2_resp_bits_pte_w), .io_requestor_2_resp_bits_pte_r (_ptw_io_requestor_2_resp_bits_pte_r), .io_requestor_2_resp_bits_pte_v (_ptw_io_requestor_2_resp_bits_pte_v), .io_requestor_2_resp_bits_level (_ptw_io_requestor_2_resp_bits_level), .io_requestor_2_resp_bits_homogeneous (_ptw_io_requestor_2_resp_bits_homogeneous), .io_requestor_2_resp_bits_gpa_valid (_ptw_io_requestor_2_resp_bits_gpa_valid), .io_requestor_2_resp_bits_gpa_bits (_ptw_io_requestor_2_resp_bits_gpa_bits), .io_requestor_2_resp_bits_gpa_is_pte (_ptw_io_requestor_2_resp_bits_gpa_is_pte), .io_requestor_2_ptbr_mode (_ptw_io_requestor_2_ptbr_mode), .io_requestor_2_ptbr_ppn (_ptw_io_requestor_2_ptbr_ppn), .io_requestor_2_status_debug (_ptw_io_requestor_2_status_debug), .io_requestor_2_status_cease (_ptw_io_requestor_2_status_cease), .io_requestor_2_status_wfi (_ptw_io_requestor_2_status_wfi), .io_requestor_2_status_isa (_ptw_io_requestor_2_status_isa), .io_requestor_2_status_dprv (_ptw_io_requestor_2_status_dprv), .io_requestor_2_status_dv (_ptw_io_requestor_2_status_dv), .io_requestor_2_status_prv (_ptw_io_requestor_2_status_prv), .io_requestor_2_status_v (_ptw_io_requestor_2_status_v), .io_requestor_2_status_mpv (_ptw_io_requestor_2_status_mpv), .io_requestor_2_status_gva (_ptw_io_requestor_2_status_gva), .io_requestor_2_status_tsr (_ptw_io_requestor_2_status_tsr), .io_requestor_2_status_tw (_ptw_io_requestor_2_status_tw), .io_requestor_2_status_tvm (_ptw_io_requestor_2_status_tvm), .io_requestor_2_status_mxr (_ptw_io_requestor_2_status_mxr), .io_requestor_2_status_sum (_ptw_io_requestor_2_status_sum), .io_requestor_2_status_mprv (_ptw_io_requestor_2_status_mprv), .io_requestor_2_status_fs (_ptw_io_requestor_2_status_fs), .io_requestor_2_status_mpp (_ptw_io_requestor_2_status_mpp), .io_requestor_2_status_spp (_ptw_io_requestor_2_status_spp), .io_requestor_2_status_mpie (_ptw_io_requestor_2_status_mpie), .io_requestor_2_status_spie (_ptw_io_requestor_2_status_spie), .io_requestor_2_status_mie (_ptw_io_requestor_2_status_mie), .io_requestor_2_status_sie (_ptw_io_requestor_2_status_sie), .io_requestor_2_hstatus_spvp (_ptw_io_requestor_2_hstatus_spvp), .io_requestor_2_hstatus_spv (_ptw_io_requestor_2_hstatus_spv), .io_requestor_2_hstatus_gva (_ptw_io_requestor_2_hstatus_gva), .io_requestor_2_gstatus_debug (_ptw_io_requestor_2_gstatus_debug), .io_requestor_2_gstatus_cease (_ptw_io_requestor_2_gstatus_cease), .io_requestor_2_gstatus_wfi (_ptw_io_requestor_2_gstatus_wfi), .io_requestor_2_gstatus_isa (_ptw_io_requestor_2_gstatus_isa), .io_requestor_2_gstatus_dprv (_ptw_io_requestor_2_gstatus_dprv), .io_requestor_2_gstatus_dv (_ptw_io_requestor_2_gstatus_dv), .io_requestor_2_gstatus_prv (_ptw_io_requestor_2_gstatus_prv), .io_requestor_2_gstatus_v (_ptw_io_requestor_2_gstatus_v), .io_requestor_2_gstatus_zero2 (_ptw_io_requestor_2_gstatus_zero2), .io_requestor_2_gstatus_mpv (_ptw_io_requestor_2_gstatus_mpv), .io_requestor_2_gstatus_gva (_ptw_io_requestor_2_gstatus_gva), .io_requestor_2_gstatus_mbe (_ptw_io_requestor_2_gstatus_mbe), .io_requestor_2_gstatus_sbe (_ptw_io_requestor_2_gstatus_sbe), .io_requestor_2_gstatus_sxl (_ptw_io_requestor_2_gstatus_sxl), .io_requestor_2_gstatus_zero1 (_ptw_io_requestor_2_gstatus_zero1), .io_requestor_2_gstatus_tsr (_ptw_io_requestor_2_gstatus_tsr), .io_requestor_2_gstatus_tw (_ptw_io_requestor_2_gstatus_tw), .io_requestor_2_gstatus_tvm (_ptw_io_requestor_2_gstatus_tvm), .io_requestor_2_gstatus_mxr (_ptw_io_requestor_2_gstatus_mxr), .io_requestor_2_gstatus_sum (_ptw_io_requestor_2_gstatus_sum), .io_requestor_2_gstatus_mprv (_ptw_io_requestor_2_gstatus_mprv), .io_requestor_2_gstatus_fs (_ptw_io_requestor_2_gstatus_fs), .io_requestor_2_gstatus_mpp (_ptw_io_requestor_2_gstatus_mpp), .io_requestor_2_gstatus_vs (_ptw_io_requestor_2_gstatus_vs), .io_requestor_2_gstatus_spp (_ptw_io_requestor_2_gstatus_spp), .io_requestor_2_gstatus_mpie (_ptw_io_requestor_2_gstatus_mpie), .io_requestor_2_gstatus_ube (_ptw_io_requestor_2_gstatus_ube), .io_requestor_2_gstatus_spie (_ptw_io_requestor_2_gstatus_spie), .io_requestor_2_gstatus_upie (_ptw_io_requestor_2_gstatus_upie), .io_requestor_2_gstatus_mie (_ptw_io_requestor_2_gstatus_mie), .io_requestor_2_gstatus_hie (_ptw_io_requestor_2_gstatus_hie), .io_requestor_2_gstatus_sie (_ptw_io_requestor_2_gstatus_sie), .io_requestor_2_gstatus_uie (_ptw_io_requestor_2_gstatus_uie), .io_requestor_2_pmp_0_cfg_l (_ptw_io_requestor_2_pmp_0_cfg_l), .io_requestor_2_pmp_0_cfg_a (_ptw_io_requestor_2_pmp_0_cfg_a), .io_requestor_2_pmp_0_cfg_x (_ptw_io_requestor_2_pmp_0_cfg_x), .io_requestor_2_pmp_0_cfg_w (_ptw_io_requestor_2_pmp_0_cfg_w), .io_requestor_2_pmp_0_cfg_r (_ptw_io_requestor_2_pmp_0_cfg_r), .io_requestor_2_pmp_0_addr (_ptw_io_requestor_2_pmp_0_addr), .io_requestor_2_pmp_0_mask (_ptw_io_requestor_2_pmp_0_mask), .io_requestor_2_pmp_1_cfg_l (_ptw_io_requestor_2_pmp_1_cfg_l), .io_requestor_2_pmp_1_cfg_a (_ptw_io_requestor_2_pmp_1_cfg_a), .io_requestor_2_pmp_1_cfg_x (_ptw_io_requestor_2_pmp_1_cfg_x), .io_requestor_2_pmp_1_cfg_w (_ptw_io_requestor_2_pmp_1_cfg_w), .io_requestor_2_pmp_1_cfg_r (_ptw_io_requestor_2_pmp_1_cfg_r), .io_requestor_2_pmp_1_addr (_ptw_io_requestor_2_pmp_1_addr), .io_requestor_2_pmp_1_mask (_ptw_io_requestor_2_pmp_1_mask), .io_requestor_2_pmp_2_cfg_l (_ptw_io_requestor_2_pmp_2_cfg_l), .io_requestor_2_pmp_2_cfg_a (_ptw_io_requestor_2_pmp_2_cfg_a), .io_requestor_2_pmp_2_cfg_x (_ptw_io_requestor_2_pmp_2_cfg_x), .io_requestor_2_pmp_2_cfg_w (_ptw_io_requestor_2_pmp_2_cfg_w), .io_requestor_2_pmp_2_cfg_r (_ptw_io_requestor_2_pmp_2_cfg_r), .io_requestor_2_pmp_2_addr (_ptw_io_requestor_2_pmp_2_addr), .io_requestor_2_pmp_2_mask (_ptw_io_requestor_2_pmp_2_mask), .io_requestor_2_pmp_3_cfg_l (_ptw_io_requestor_2_pmp_3_cfg_l), .io_requestor_2_pmp_3_cfg_a (_ptw_io_requestor_2_pmp_3_cfg_a), .io_requestor_2_pmp_3_cfg_x (_ptw_io_requestor_2_pmp_3_cfg_x), .io_requestor_2_pmp_3_cfg_w (_ptw_io_requestor_2_pmp_3_cfg_w), .io_requestor_2_pmp_3_cfg_r (_ptw_io_requestor_2_pmp_3_cfg_r), .io_requestor_2_pmp_3_addr (_ptw_io_requestor_2_pmp_3_addr), .io_requestor_2_pmp_3_mask (_ptw_io_requestor_2_pmp_3_mask), .io_requestor_2_pmp_4_cfg_l (_ptw_io_requestor_2_pmp_4_cfg_l), .io_requestor_2_pmp_4_cfg_a (_ptw_io_requestor_2_pmp_4_cfg_a), .io_requestor_2_pmp_4_cfg_x (_ptw_io_requestor_2_pmp_4_cfg_x), .io_requestor_2_pmp_4_cfg_w (_ptw_io_requestor_2_pmp_4_cfg_w), .io_requestor_2_pmp_4_cfg_r (_ptw_io_requestor_2_pmp_4_cfg_r), .io_requestor_2_pmp_4_addr (_ptw_io_requestor_2_pmp_4_addr), .io_requestor_2_pmp_4_mask (_ptw_io_requestor_2_pmp_4_mask), .io_requestor_2_pmp_5_cfg_l (_ptw_io_requestor_2_pmp_5_cfg_l), .io_requestor_2_pmp_5_cfg_a (_ptw_io_requestor_2_pmp_5_cfg_a), .io_requestor_2_pmp_5_cfg_x (_ptw_io_requestor_2_pmp_5_cfg_x), .io_requestor_2_pmp_5_cfg_w (_ptw_io_requestor_2_pmp_5_cfg_w), .io_requestor_2_pmp_5_cfg_r (_ptw_io_requestor_2_pmp_5_cfg_r), .io_requestor_2_pmp_5_addr (_ptw_io_requestor_2_pmp_5_addr), .io_requestor_2_pmp_5_mask (_ptw_io_requestor_2_pmp_5_mask), .io_requestor_2_pmp_6_cfg_l (_ptw_io_requestor_2_pmp_6_cfg_l), .io_requestor_2_pmp_6_cfg_a (_ptw_io_requestor_2_pmp_6_cfg_a), .io_requestor_2_pmp_6_cfg_x (_ptw_io_requestor_2_pmp_6_cfg_x), .io_requestor_2_pmp_6_cfg_w (_ptw_io_requestor_2_pmp_6_cfg_w), .io_requestor_2_pmp_6_cfg_r (_ptw_io_requestor_2_pmp_6_cfg_r), .io_requestor_2_pmp_6_addr (_ptw_io_requestor_2_pmp_6_addr), .io_requestor_2_pmp_6_mask (_ptw_io_requestor_2_pmp_6_mask), .io_requestor_2_pmp_7_cfg_l (_ptw_io_requestor_2_pmp_7_cfg_l), .io_requestor_2_pmp_7_cfg_a (_ptw_io_requestor_2_pmp_7_cfg_a), .io_requestor_2_pmp_7_cfg_x (_ptw_io_requestor_2_pmp_7_cfg_x), .io_requestor_2_pmp_7_cfg_w (_ptw_io_requestor_2_pmp_7_cfg_w), .io_requestor_2_pmp_7_cfg_r (_ptw_io_requestor_2_pmp_7_cfg_r), .io_requestor_2_pmp_7_addr (_ptw_io_requestor_2_pmp_7_addr), .io_requestor_2_pmp_7_mask (_ptw_io_requestor_2_pmp_7_mask), .io_requestor_2_customCSRs_csrs_0_ren (_ptw_io_requestor_2_customCSRs_csrs_0_ren), .io_requestor_2_customCSRs_csrs_0_wen (_ptw_io_requestor_2_customCSRs_csrs_0_wen), .io_requestor_2_customCSRs_csrs_0_wdata (_ptw_io_requestor_2_customCSRs_csrs_0_wdata), .io_requestor_2_customCSRs_csrs_0_value (_ptw_io_requestor_2_customCSRs_csrs_0_value), .io_requestor_2_customCSRs_csrs_1_ren (_ptw_io_requestor_2_customCSRs_csrs_1_ren), .io_requestor_2_customCSRs_csrs_1_wen (_ptw_io_requestor_2_customCSRs_csrs_1_wen), .io_requestor_2_customCSRs_csrs_1_wdata (_ptw_io_requestor_2_customCSRs_csrs_1_wdata), .io_requestor_2_customCSRs_csrs_1_value (_ptw_io_requestor_2_customCSRs_csrs_1_value), .io_requestor_2_customCSRs_csrs_2_ren (_ptw_io_requestor_2_customCSRs_csrs_2_ren), .io_requestor_2_customCSRs_csrs_2_wen (_ptw_io_requestor_2_customCSRs_csrs_2_wen), .io_requestor_2_customCSRs_csrs_2_wdata (_ptw_io_requestor_2_customCSRs_csrs_2_wdata), .io_requestor_2_customCSRs_csrs_2_value (_ptw_io_requestor_2_customCSRs_csrs_2_value), .io_requestor_2_customCSRs_csrs_3_ren (_ptw_io_requestor_2_customCSRs_csrs_3_ren), .io_requestor_2_customCSRs_csrs_3_wen (_ptw_io_requestor_2_customCSRs_csrs_3_wen), .io_requestor_2_customCSRs_csrs_3_wdata (_ptw_io_requestor_2_customCSRs_csrs_3_wdata), .io_requestor_2_customCSRs_csrs_3_value (_ptw_io_requestor_2_customCSRs_csrs_3_value), .io_requestor_3_req_ready (_ptw_io_requestor_3_req_ready), .io_requestor_3_req_valid (_frontend_io_ptw_req_valid), // @[Frontend.scala:393:28] .io_requestor_3_req_bits_valid (_frontend_io_ptw_req_bits_valid), // @[Frontend.scala:393:28] .io_requestor_3_req_bits_bits_addr (_frontend_io_ptw_req_bits_bits_addr), // @[Frontend.scala:393:28] .io_requestor_3_req_bits_bits_need_gpa (_frontend_io_ptw_req_bits_bits_need_gpa), // @[Frontend.scala:393:28] .io_requestor_3_resp_valid (_ptw_io_requestor_3_resp_valid), .io_requestor_3_resp_bits_ae_ptw (_ptw_io_requestor_3_resp_bits_ae_ptw), .io_requestor_3_resp_bits_ae_final (_ptw_io_requestor_3_resp_bits_ae_final), .io_requestor_3_resp_bits_pf (_ptw_io_requestor_3_resp_bits_pf), .io_requestor_3_resp_bits_gf (_ptw_io_requestor_3_resp_bits_gf), .io_requestor_3_resp_bits_hr (_ptw_io_requestor_3_resp_bits_hr), .io_requestor_3_resp_bits_hw (_ptw_io_requestor_3_resp_bits_hw), .io_requestor_3_resp_bits_hx (_ptw_io_requestor_3_resp_bits_hx), .io_requestor_3_resp_bits_pte_reserved_for_future (_ptw_io_requestor_3_resp_bits_pte_reserved_for_future), .io_requestor_3_resp_bits_pte_ppn (_ptw_io_requestor_3_resp_bits_pte_ppn), .io_requestor_3_resp_bits_pte_reserved_for_software (_ptw_io_requestor_3_resp_bits_pte_reserved_for_software), .io_requestor_3_resp_bits_pte_d (_ptw_io_requestor_3_resp_bits_pte_d), .io_requestor_3_resp_bits_pte_a (_ptw_io_requestor_3_resp_bits_pte_a), .io_requestor_3_resp_bits_pte_g (_ptw_io_requestor_3_resp_bits_pte_g), .io_requestor_3_resp_bits_pte_u (_ptw_io_requestor_3_resp_bits_pte_u), .io_requestor_3_resp_bits_pte_x (_ptw_io_requestor_3_resp_bits_pte_x), .io_requestor_3_resp_bits_pte_w (_ptw_io_requestor_3_resp_bits_pte_w), .io_requestor_3_resp_bits_pte_r (_ptw_io_requestor_3_resp_bits_pte_r), .io_requestor_3_resp_bits_pte_v (_ptw_io_requestor_3_resp_bits_pte_v), .io_requestor_3_resp_bits_level (_ptw_io_requestor_3_resp_bits_level), .io_requestor_3_resp_bits_homogeneous (_ptw_io_requestor_3_resp_bits_homogeneous), .io_requestor_3_resp_bits_gpa_valid (_ptw_io_requestor_3_resp_bits_gpa_valid), .io_requestor_3_resp_bits_gpa_bits (_ptw_io_requestor_3_resp_bits_gpa_bits), .io_requestor_3_resp_bits_gpa_is_pte (_ptw_io_requestor_3_resp_bits_gpa_is_pte), .io_requestor_3_ptbr_mode (_ptw_io_requestor_3_ptbr_mode), .io_requestor_3_ptbr_ppn (_ptw_io_requestor_3_ptbr_ppn), .io_requestor_3_status_debug (_ptw_io_requestor_3_status_debug), .io_requestor_3_status_cease (_ptw_io_requestor_3_status_cease), .io_requestor_3_status_wfi (_ptw_io_requestor_3_status_wfi), .io_requestor_3_status_isa (_ptw_io_requestor_3_status_isa), .io_requestor_3_status_dprv (_ptw_io_requestor_3_status_dprv), .io_requestor_3_status_dv (_ptw_io_requestor_3_status_dv), .io_requestor_3_status_prv (_ptw_io_requestor_3_status_prv), .io_requestor_3_status_v (_ptw_io_requestor_3_status_v), .io_requestor_3_status_mpv (_ptw_io_requestor_3_status_mpv), .io_requestor_3_status_gva (_ptw_io_requestor_3_status_gva), .io_requestor_3_status_tsr (_ptw_io_requestor_3_status_tsr), .io_requestor_3_status_tw (_ptw_io_requestor_3_status_tw), .io_requestor_3_status_tvm (_ptw_io_requestor_3_status_tvm), .io_requestor_3_status_mxr (_ptw_io_requestor_3_status_mxr), .io_requestor_3_status_sum (_ptw_io_requestor_3_status_sum), .io_requestor_3_status_mprv (_ptw_io_requestor_3_status_mprv), .io_requestor_3_status_fs (_ptw_io_requestor_3_status_fs), .io_requestor_3_status_mpp (_ptw_io_requestor_3_status_mpp), .io_requestor_3_status_spp (_ptw_io_requestor_3_status_spp), .io_requestor_3_status_mpie (_ptw_io_requestor_3_status_mpie), .io_requestor_3_status_spie (_ptw_io_requestor_3_status_spie), .io_requestor_3_status_mie (_ptw_io_requestor_3_status_mie), .io_requestor_3_status_sie (_ptw_io_requestor_3_status_sie), .io_requestor_3_hstatus_spvp (_ptw_io_requestor_3_hstatus_spvp), .io_requestor_3_hstatus_spv (_ptw_io_requestor_3_hstatus_spv), .io_requestor_3_hstatus_gva (_ptw_io_requestor_3_hstatus_gva), .io_requestor_3_gstatus_debug (_ptw_io_requestor_3_gstatus_debug), .io_requestor_3_gstatus_cease (_ptw_io_requestor_3_gstatus_cease), .io_requestor_3_gstatus_wfi (_ptw_io_requestor_3_gstatus_wfi), .io_requestor_3_gstatus_isa (_ptw_io_requestor_3_gstatus_isa), .io_requestor_3_gstatus_dprv (_ptw_io_requestor_3_gstatus_dprv), .io_requestor_3_gstatus_dv (_ptw_io_requestor_3_gstatus_dv), .io_requestor_3_gstatus_prv (_ptw_io_requestor_3_gstatus_prv), .io_requestor_3_gstatus_v (_ptw_io_requestor_3_gstatus_v), .io_requestor_3_gstatus_zero2 (_ptw_io_requestor_3_gstatus_zero2), .io_requestor_3_gstatus_mpv (_ptw_io_requestor_3_gstatus_mpv), .io_requestor_3_gstatus_gva (_ptw_io_requestor_3_gstatus_gva), .io_requestor_3_gstatus_mbe (_ptw_io_requestor_3_gstatus_mbe), .io_requestor_3_gstatus_sbe (_ptw_io_requestor_3_gstatus_sbe), .io_requestor_3_gstatus_sxl (_ptw_io_requestor_3_gstatus_sxl), .io_requestor_3_gstatus_zero1 (_ptw_io_requestor_3_gstatus_zero1), .io_requestor_3_gstatus_tsr (_ptw_io_requestor_3_gstatus_tsr), .io_requestor_3_gstatus_tw (_ptw_io_requestor_3_gstatus_tw), .io_requestor_3_gstatus_tvm (_ptw_io_requestor_3_gstatus_tvm), .io_requestor_3_gstatus_mxr (_ptw_io_requestor_3_gstatus_mxr), .io_requestor_3_gstatus_sum (_ptw_io_requestor_3_gstatus_sum), .io_requestor_3_gstatus_mprv (_ptw_io_requestor_3_gstatus_mprv), .io_requestor_3_gstatus_fs (_ptw_io_requestor_3_gstatus_fs), .io_requestor_3_gstatus_mpp (_ptw_io_requestor_3_gstatus_mpp), .io_requestor_3_gstatus_vs (_ptw_io_requestor_3_gstatus_vs), .io_requestor_3_gstatus_spp (_ptw_io_requestor_3_gstatus_spp), .io_requestor_3_gstatus_mpie (_ptw_io_requestor_3_gstatus_mpie), .io_requestor_3_gstatus_ube (_ptw_io_requestor_3_gstatus_ube), .io_requestor_3_gstatus_spie (_ptw_io_requestor_3_gstatus_spie), .io_requestor_3_gstatus_upie (_ptw_io_requestor_3_gstatus_upie), .io_requestor_3_gstatus_mie (_ptw_io_requestor_3_gstatus_mie), .io_requestor_3_gstatus_hie (_ptw_io_requestor_3_gstatus_hie), .io_requestor_3_gstatus_sie (_ptw_io_requestor_3_gstatus_sie), .io_requestor_3_gstatus_uie (_ptw_io_requestor_3_gstatus_uie), .io_requestor_3_pmp_0_cfg_l (_ptw_io_requestor_3_pmp_0_cfg_l), .io_requestor_3_pmp_0_cfg_a (_ptw_io_requestor_3_pmp_0_cfg_a), .io_requestor_3_pmp_0_cfg_x (_ptw_io_requestor_3_pmp_0_cfg_x), .io_requestor_3_pmp_0_cfg_w (_ptw_io_requestor_3_pmp_0_cfg_w), .io_requestor_3_pmp_0_cfg_r (_ptw_io_requestor_3_pmp_0_cfg_r), .io_requestor_3_pmp_0_addr (_ptw_io_requestor_3_pmp_0_addr), .io_requestor_3_pmp_0_mask (_ptw_io_requestor_3_pmp_0_mask), .io_requestor_3_pmp_1_cfg_l (_ptw_io_requestor_3_pmp_1_cfg_l), .io_requestor_3_pmp_1_cfg_a (_ptw_io_requestor_3_pmp_1_cfg_a), .io_requestor_3_pmp_1_cfg_x (_ptw_io_requestor_3_pmp_1_cfg_x), .io_requestor_3_pmp_1_cfg_w (_ptw_io_requestor_3_pmp_1_cfg_w), .io_requestor_3_pmp_1_cfg_r (_ptw_io_requestor_3_pmp_1_cfg_r), .io_requestor_3_pmp_1_addr (_ptw_io_requestor_3_pmp_1_addr), .io_requestor_3_pmp_1_mask (_ptw_io_requestor_3_pmp_1_mask), .io_requestor_3_pmp_2_cfg_l (_ptw_io_requestor_3_pmp_2_cfg_l), .io_requestor_3_pmp_2_cfg_a (_ptw_io_requestor_3_pmp_2_cfg_a), .io_requestor_3_pmp_2_cfg_x (_ptw_io_requestor_3_pmp_2_cfg_x), .io_requestor_3_pmp_2_cfg_w (_ptw_io_requestor_3_pmp_2_cfg_w), .io_requestor_3_pmp_2_cfg_r (_ptw_io_requestor_3_pmp_2_cfg_r), .io_requestor_3_pmp_2_addr (_ptw_io_requestor_3_pmp_2_addr), .io_requestor_3_pmp_2_mask (_ptw_io_requestor_3_pmp_2_mask), .io_requestor_3_pmp_3_cfg_l (_ptw_io_requestor_3_pmp_3_cfg_l), .io_requestor_3_pmp_3_cfg_a (_ptw_io_requestor_3_pmp_3_cfg_a), .io_requestor_3_pmp_3_cfg_x (_ptw_io_requestor_3_pmp_3_cfg_x), .io_requestor_3_pmp_3_cfg_w (_ptw_io_requestor_3_pmp_3_cfg_w), .io_requestor_3_pmp_3_cfg_r (_ptw_io_requestor_3_pmp_3_cfg_r), .io_requestor_3_pmp_3_addr (_ptw_io_requestor_3_pmp_3_addr), .io_requestor_3_pmp_3_mask (_ptw_io_requestor_3_pmp_3_mask), .io_requestor_3_pmp_4_cfg_l (_ptw_io_requestor_3_pmp_4_cfg_l), .io_requestor_3_pmp_4_cfg_a (_ptw_io_requestor_3_pmp_4_cfg_a), .io_requestor_3_pmp_4_cfg_x (_ptw_io_requestor_3_pmp_4_cfg_x), .io_requestor_3_pmp_4_cfg_w (_ptw_io_requestor_3_pmp_4_cfg_w), .io_requestor_3_pmp_4_cfg_r (_ptw_io_requestor_3_pmp_4_cfg_r), .io_requestor_3_pmp_4_addr (_ptw_io_requestor_3_pmp_4_addr), .io_requestor_3_pmp_4_mask (_ptw_io_requestor_3_pmp_4_mask), .io_requestor_3_pmp_5_cfg_l (_ptw_io_requestor_3_pmp_5_cfg_l), .io_requestor_3_pmp_5_cfg_a (_ptw_io_requestor_3_pmp_5_cfg_a), .io_requestor_3_pmp_5_cfg_x (_ptw_io_requestor_3_pmp_5_cfg_x), .io_requestor_3_pmp_5_cfg_w (_ptw_io_requestor_3_pmp_5_cfg_w), .io_requestor_3_pmp_5_cfg_r (_ptw_io_requestor_3_pmp_5_cfg_r), .io_requestor_3_pmp_5_addr (_ptw_io_requestor_3_pmp_5_addr), .io_requestor_3_pmp_5_mask (_ptw_io_requestor_3_pmp_5_mask), .io_requestor_3_pmp_6_cfg_l (_ptw_io_requestor_3_pmp_6_cfg_l), .io_requestor_3_pmp_6_cfg_a (_ptw_io_requestor_3_pmp_6_cfg_a), .io_requestor_3_pmp_6_cfg_x (_ptw_io_requestor_3_pmp_6_cfg_x), .io_requestor_3_pmp_6_cfg_w (_ptw_io_requestor_3_pmp_6_cfg_w), .io_requestor_3_pmp_6_cfg_r (_ptw_io_requestor_3_pmp_6_cfg_r), .io_requestor_3_pmp_6_addr (_ptw_io_requestor_3_pmp_6_addr), .io_requestor_3_pmp_6_mask (_ptw_io_requestor_3_pmp_6_mask), .io_requestor_3_pmp_7_cfg_l (_ptw_io_requestor_3_pmp_7_cfg_l), .io_requestor_3_pmp_7_cfg_a (_ptw_io_requestor_3_pmp_7_cfg_a), .io_requestor_3_pmp_7_cfg_x (_ptw_io_requestor_3_pmp_7_cfg_x), .io_requestor_3_pmp_7_cfg_w (_ptw_io_requestor_3_pmp_7_cfg_w), .io_requestor_3_pmp_7_cfg_r (_ptw_io_requestor_3_pmp_7_cfg_r), .io_requestor_3_pmp_7_addr (_ptw_io_requestor_3_pmp_7_addr), .io_requestor_3_pmp_7_mask (_ptw_io_requestor_3_pmp_7_mask), .io_requestor_3_customCSRs_csrs_0_ren (_ptw_io_requestor_3_customCSRs_csrs_0_ren), .io_requestor_3_customCSRs_csrs_0_wen (_ptw_io_requestor_3_customCSRs_csrs_0_wen), .io_requestor_3_customCSRs_csrs_0_wdata (_ptw_io_requestor_3_customCSRs_csrs_0_wdata), .io_requestor_3_customCSRs_csrs_0_value (_ptw_io_requestor_3_customCSRs_csrs_0_value), .io_requestor_3_customCSRs_csrs_1_ren (_ptw_io_requestor_3_customCSRs_csrs_1_ren), .io_requestor_3_customCSRs_csrs_1_wen (_ptw_io_requestor_3_customCSRs_csrs_1_wen), .io_requestor_3_customCSRs_csrs_1_wdata (_ptw_io_requestor_3_customCSRs_csrs_1_wdata), .io_requestor_3_customCSRs_csrs_1_value (_ptw_io_requestor_3_customCSRs_csrs_1_value), .io_requestor_3_customCSRs_csrs_2_ren (_ptw_io_requestor_3_customCSRs_csrs_2_ren), .io_requestor_3_customCSRs_csrs_2_wen (_ptw_io_requestor_3_customCSRs_csrs_2_wen), .io_requestor_3_customCSRs_csrs_2_wdata (_ptw_io_requestor_3_customCSRs_csrs_2_wdata), .io_requestor_3_customCSRs_csrs_2_value (_ptw_io_requestor_3_customCSRs_csrs_2_value), .io_requestor_3_customCSRs_csrs_3_ren (_ptw_io_requestor_3_customCSRs_csrs_3_ren), .io_requestor_3_customCSRs_csrs_3_wen (_ptw_io_requestor_3_customCSRs_csrs_3_wen), .io_requestor_3_customCSRs_csrs_3_wdata (_ptw_io_requestor_3_customCSRs_csrs_3_wdata), .io_requestor_3_customCSRs_csrs_3_value (_ptw_io_requestor_3_customCSRs_csrs_3_value), .io_mem_req_ready (_dcacheArb_io_requestor_0_req_ready), // @[HellaCache.scala:292:25] .io_mem_req_valid (_ptw_io_mem_req_valid), .io_mem_req_bits_addr (_ptw_io_mem_req_bits_addr), .io_mem_req_bits_dv (_ptw_io_mem_req_bits_dv), .io_mem_s1_kill (_ptw_io_mem_s1_kill), .io_mem_s2_nack (_dcacheArb_io_requestor_0_s2_nack), // @[HellaCache.scala:292:25] .io_mem_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), // @[HellaCache.scala:292:25] .io_mem_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), // @[HellaCache.scala:292:25] .io_mem_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), // @[HellaCache.scala:292:25] .io_mem_resp_valid (_dcacheArb_io_requestor_0_resp_valid), // @[HellaCache.scala:292:25] .io_mem_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), // @[HellaCache.scala:292:25] .io_mem_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), // @[HellaCache.scala:292:25] .io_mem_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), // @[HellaCache.scala:292:25] .io_mem_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), // @[HellaCache.scala:292:25] .io_mem_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), // @[HellaCache.scala:292:25] .io_mem_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), // @[HellaCache.scala:292:25] .io_mem_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), // @[HellaCache.scala:292:25] .io_mem_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), // @[HellaCache.scala:292:25] .io_mem_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), // @[HellaCache.scala:292:25] .io_mem_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), // @[HellaCache.scala:292:25] .io_mem_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), // @[HellaCache.scala:292:25] .io_mem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25] .io_mem_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), // @[HellaCache.scala:292:25] .io_mem_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), // @[HellaCache.scala:292:25] .io_mem_replay_next (_dcacheArb_io_requestor_0_replay_next), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), // @[HellaCache.scala:292:25] .io_mem_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), // @[HellaCache.scala:292:25] .io_mem_ordered (_dcacheArb_io_requestor_0_ordered), // @[HellaCache.scala:292:25] .io_mem_store_pending (_dcacheArb_io_requestor_0_store_pending), // @[HellaCache.scala:292:25] .io_mem_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), // @[HellaCache.scala:292:25] .io_mem_perf_release (_dcacheArb_io_requestor_0_perf_release), // @[HellaCache.scala:292:25] .io_mem_perf_grant (_dcacheArb_io_requestor_0_perf_grant), // @[HellaCache.scala:292:25] .io_mem_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss), // @[HellaCache.scala:292:25] .io_mem_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), // @[HellaCache.scala:292:25] .io_mem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25] .io_mem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25] .io_mem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25] .io_mem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25] .io_mem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:292:25] .io_dpath_ptbr_mode (_core_io_ptw_ptbr_mode), // @[RocketTile.scala:147:20] .io_dpath_ptbr_ppn (_core_io_ptw_ptbr_ppn), // @[RocketTile.scala:147:20] .io_dpath_sfence_valid (_core_io_ptw_sfence_valid), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_rs1 (_core_io_ptw_sfence_bits_rs1), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_rs2 (_core_io_ptw_sfence_bits_rs2), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_addr (_core_io_ptw_sfence_bits_addr), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_asid (_core_io_ptw_sfence_bits_asid), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_hv (_core_io_ptw_sfence_bits_hv), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_hg (_core_io_ptw_sfence_bits_hg), // @[RocketTile.scala:147:20] .io_dpath_status_debug (_core_io_ptw_status_debug), // @[RocketTile.scala:147:20] .io_dpath_status_cease (_core_io_ptw_status_cease), // @[RocketTile.scala:147:20] .io_dpath_status_wfi (_core_io_ptw_status_wfi), // @[RocketTile.scala:147:20] .io_dpath_status_isa (_core_io_ptw_status_isa), // @[RocketTile.scala:147:20] .io_dpath_status_dprv (_core_io_ptw_status_dprv), // @[RocketTile.scala:147:20] .io_dpath_status_dv (_core_io_ptw_status_dv), // @[RocketTile.scala:147:20] .io_dpath_status_prv (_core_io_ptw_status_prv), // @[RocketTile.scala:147:20] .io_dpath_status_v (_core_io_ptw_status_v), // @[RocketTile.scala:147:20] .io_dpath_status_mpv (_core_io_ptw_status_mpv), // @[RocketTile.scala:147:20] .io_dpath_status_gva (_core_io_ptw_status_gva), // @[RocketTile.scala:147:20] .io_dpath_status_tsr (_core_io_ptw_status_tsr), // @[RocketTile.scala:147:20] .io_dpath_status_tw (_core_io_ptw_status_tw), // @[RocketTile.scala:147:20] .io_dpath_status_tvm (_core_io_ptw_status_tvm), // @[RocketTile.scala:147:20] .io_dpath_status_mxr (_core_io_ptw_status_mxr), // @[RocketTile.scala:147:20] .io_dpath_status_sum (_core_io_ptw_status_sum), // @[RocketTile.scala:147:20] .io_dpath_status_mprv (_core_io_ptw_status_mprv), // @[RocketTile.scala:147:20] .io_dpath_status_fs (_core_io_ptw_status_fs), // @[RocketTile.scala:147:20] .io_dpath_status_mpp (_core_io_ptw_status_mpp), // @[RocketTile.scala:147:20] .io_dpath_status_spp (_core_io_ptw_status_spp), // @[RocketTile.scala:147:20] .io_dpath_status_mpie (_core_io_ptw_status_mpie), // @[RocketTile.scala:147:20] .io_dpath_status_spie (_core_io_ptw_status_spie), // @[RocketTile.scala:147:20] .io_dpath_status_mie (_core_io_ptw_status_mie), // @[RocketTile.scala:147:20] .io_dpath_status_sie (_core_io_ptw_status_sie), // @[RocketTile.scala:147:20] .io_dpath_hstatus_spvp (_core_io_ptw_hstatus_spvp), // @[RocketTile.scala:147:20] .io_dpath_hstatus_spv (_core_io_ptw_hstatus_spv), // @[RocketTile.scala:147:20] .io_dpath_hstatus_gva (_core_io_ptw_hstatus_gva), // @[RocketTile.scala:147:20] .io_dpath_gstatus_debug (_core_io_ptw_gstatus_debug), // @[RocketTile.scala:147:20] .io_dpath_gstatus_cease (_core_io_ptw_gstatus_cease), // @[RocketTile.scala:147:20] .io_dpath_gstatus_wfi (_core_io_ptw_gstatus_wfi), // @[RocketTile.scala:147:20] .io_dpath_gstatus_isa (_core_io_ptw_gstatus_isa), // @[RocketTile.scala:147:20] .io_dpath_gstatus_dprv (_core_io_ptw_gstatus_dprv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_dv (_core_io_ptw_gstatus_dv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_prv (_core_io_ptw_gstatus_prv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_v (_core_io_ptw_gstatus_v), // @[RocketTile.scala:147:20] .io_dpath_gstatus_zero2 (_core_io_ptw_gstatus_zero2), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mpv (_core_io_ptw_gstatus_mpv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_gva (_core_io_ptw_gstatus_gva), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mbe (_core_io_ptw_gstatus_mbe), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sbe (_core_io_ptw_gstatus_sbe), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sxl (_core_io_ptw_gstatus_sxl), // @[RocketTile.scala:147:20] .io_dpath_gstatus_zero1 (_core_io_ptw_gstatus_zero1), // @[RocketTile.scala:147:20] .io_dpath_gstatus_tsr (_core_io_ptw_gstatus_tsr), // @[RocketTile.scala:147:20] .io_dpath_gstatus_tw (_core_io_ptw_gstatus_tw), // @[RocketTile.scala:147:20] .io_dpath_gstatus_tvm (_core_io_ptw_gstatus_tvm), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mxr (_core_io_ptw_gstatus_mxr), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sum (_core_io_ptw_gstatus_sum), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mprv (_core_io_ptw_gstatus_mprv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_fs (_core_io_ptw_gstatus_fs), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mpp (_core_io_ptw_gstatus_mpp), // @[RocketTile.scala:147:20] .io_dpath_gstatus_vs (_core_io_ptw_gstatus_vs), // @[RocketTile.scala:147:20] .io_dpath_gstatus_spp (_core_io_ptw_gstatus_spp), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mpie (_core_io_ptw_gstatus_mpie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_ube (_core_io_ptw_gstatus_ube), // @[RocketTile.scala:147:20] .io_dpath_gstatus_spie (_core_io_ptw_gstatus_spie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_upie (_core_io_ptw_gstatus_upie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mie (_core_io_ptw_gstatus_mie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_hie (_core_io_ptw_gstatus_hie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sie (_core_io_ptw_gstatus_sie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_uie (_core_io_ptw_gstatus_uie), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_l (_core_io_ptw_pmp_0_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_a (_core_io_ptw_pmp_0_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_x (_core_io_ptw_pmp_0_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_w (_core_io_ptw_pmp_0_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_r (_core_io_ptw_pmp_0_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_addr (_core_io_ptw_pmp_0_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_mask (_core_io_ptw_pmp_0_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_l (_core_io_ptw_pmp_1_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_a (_core_io_ptw_pmp_1_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_x (_core_io_ptw_pmp_1_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_w (_core_io_ptw_pmp_1_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_r (_core_io_ptw_pmp_1_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_addr (_core_io_ptw_pmp_1_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_mask (_core_io_ptw_pmp_1_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_l (_core_io_ptw_pmp_2_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_a (_core_io_ptw_pmp_2_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_x (_core_io_ptw_pmp_2_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_w (_core_io_ptw_pmp_2_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_r (_core_io_ptw_pmp_2_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_addr (_core_io_ptw_pmp_2_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_mask (_core_io_ptw_pmp_2_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_l (_core_io_ptw_pmp_3_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_a (_core_io_ptw_pmp_3_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_x (_core_io_ptw_pmp_3_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_w (_core_io_ptw_pmp_3_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_r (_core_io_ptw_pmp_3_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_addr (_core_io_ptw_pmp_3_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_mask (_core_io_ptw_pmp_3_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_l (_core_io_ptw_pmp_4_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_a (_core_io_ptw_pmp_4_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_x (_core_io_ptw_pmp_4_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_w (_core_io_ptw_pmp_4_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_r (_core_io_ptw_pmp_4_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_addr (_core_io_ptw_pmp_4_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_mask (_core_io_ptw_pmp_4_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_l (_core_io_ptw_pmp_5_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_a (_core_io_ptw_pmp_5_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_x (_core_io_ptw_pmp_5_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_w (_core_io_ptw_pmp_5_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_r (_core_io_ptw_pmp_5_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_addr (_core_io_ptw_pmp_5_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_mask (_core_io_ptw_pmp_5_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_l (_core_io_ptw_pmp_6_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_a (_core_io_ptw_pmp_6_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_x (_core_io_ptw_pmp_6_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_w (_core_io_ptw_pmp_6_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_r (_core_io_ptw_pmp_6_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_addr (_core_io_ptw_pmp_6_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_mask (_core_io_ptw_pmp_6_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_l (_core_io_ptw_pmp_7_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_a (_core_io_ptw_pmp_7_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_x (_core_io_ptw_pmp_7_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_w (_core_io_ptw_pmp_7_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_r (_core_io_ptw_pmp_7_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_addr (_core_io_ptw_pmp_7_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_mask (_core_io_ptw_pmp_7_mask), // @[RocketTile.scala:147:20] .io_dpath_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), .io_dpath_perf_pte_hit (_ptw_io_dpath_perf_pte_hit), .io_dpath_customCSRs_csrs_0_ren (_core_io_ptw_customCSRs_csrs_0_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_0_wen (_core_io_ptw_customCSRs_csrs_0_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_0_wdata (_core_io_ptw_customCSRs_csrs_0_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_0_value (_core_io_ptw_customCSRs_csrs_0_value), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_ren (_core_io_ptw_customCSRs_csrs_1_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_wen (_core_io_ptw_customCSRs_csrs_1_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_wdata (_core_io_ptw_customCSRs_csrs_1_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_value (_core_io_ptw_customCSRs_csrs_1_value), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_ren (_core_io_ptw_customCSRs_csrs_2_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_wen (_core_io_ptw_customCSRs_csrs_2_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_wdata (_core_io_ptw_customCSRs_csrs_2_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_value (_core_io_ptw_customCSRs_csrs_2_value), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_ren (_core_io_ptw_customCSRs_csrs_3_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_wen (_core_io_ptw_customCSRs_csrs_3_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_wdata (_core_io_ptw_customCSRs_csrs_3_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_value (_core_io_ptw_customCSRs_csrs_3_value), // @[RocketTile.scala:147:20] .io_dpath_clock_enabled (_ptw_io_dpath_clock_enabled) ); // @[PTW.scala:802:19] RRArbiter respArb ( // @[LazyRoCC.scala:101:25] .clock (clock), .reset (reset), .io_in_0_ready (_respArb_io_in_0_ready), .io_in_0_valid (_respArb_io_in_0_q_io_deq_valid), // @[Decoupled.scala:362:21] .io_in_0_bits_rd (_respArb_io_in_0_q_io_deq_bits_rd), // @[Decoupled.scala:362:21] .io_in_0_bits_data (_respArb_io_in_0_q_io_deq_bits_data), // @[Decoupled.scala:362:21] .io_out_ready (_core_io_rocc_resp_ready), // @[RocketTile.scala:147:20] .io_out_valid (_respArb_io_out_valid), .io_out_bits_rd (_respArb_io_out_bits_rd), .io_out_bits_data (_respArb_io_out_bits_data) ); // @[LazyRoCC.scala:101:25] RoccCommandRouter cmdRouter ( // @[LazyRoCC.scala:102:27] .clock (clock), .reset (reset), .io_in_ready (_cmdRouter_io_in_ready), .io_in_valid (_core_io_rocc_cmd_valid), // @[RocketTile.scala:147:20] .io_in_bits_inst_funct (_core_io_rocc_cmd_bits_inst_funct), // @[RocketTile.scala:147:20] .io_in_bits_inst_rs2 (_core_io_rocc_cmd_bits_inst_rs2), // @[RocketTile.scala:147:20] .io_in_bits_inst_rs1 (_core_io_rocc_cmd_bits_inst_rs1), // @[RocketTile.scala:147:20] .io_in_bits_inst_xd (_core_io_rocc_cmd_bits_inst_xd), // @[RocketTile.scala:147:20] .io_in_bits_inst_xs1 (_core_io_rocc_cmd_bits_inst_xs1), // @[RocketTile.scala:147:20] .io_in_bits_inst_xs2 (_core_io_rocc_cmd_bits_inst_xs2), // @[RocketTile.scala:147:20] .io_in_bits_inst_rd (_core_io_rocc_cmd_bits_inst_rd), // @[RocketTile.scala:147:20] .io_in_bits_inst_opcode (_core_io_rocc_cmd_bits_inst_opcode), // @[RocketTile.scala:147:20] .io_in_bits_rs1 (_core_io_rocc_cmd_bits_rs1), // @[RocketTile.scala:147:20] .io_in_bits_rs2 (_core_io_rocc_cmd_bits_rs2), // @[RocketTile.scala:147:20] .io_in_bits_status_debug (_core_io_rocc_cmd_bits_status_debug), // @[RocketTile.scala:147:20] .io_in_bits_status_cease (_core_io_rocc_cmd_bits_status_cease), // @[RocketTile.scala:147:20] .io_in_bits_status_wfi (_core_io_rocc_cmd_bits_status_wfi), // @[RocketTile.scala:147:20] .io_in_bits_status_isa (_core_io_rocc_cmd_bits_status_isa), // @[RocketTile.scala:147:20] .io_in_bits_status_dprv (_core_io_rocc_cmd_bits_status_dprv), // @[RocketTile.scala:147:20] .io_in_bits_status_dv (_core_io_rocc_cmd_bits_status_dv), // @[RocketTile.scala:147:20] .io_in_bits_status_prv (_core_io_rocc_cmd_bits_status_prv), // @[RocketTile.scala:147:20] .io_in_bits_status_v (_core_io_rocc_cmd_bits_status_v), // @[RocketTile.scala:147:20] .io_in_bits_status_mpv (_core_io_rocc_cmd_bits_status_mpv), // @[RocketTile.scala:147:20] .io_in_bits_status_gva (_core_io_rocc_cmd_bits_status_gva), // @[RocketTile.scala:147:20] .io_in_bits_status_tsr (_core_io_rocc_cmd_bits_status_tsr), // @[RocketTile.scala:147:20] .io_in_bits_status_tw (_core_io_rocc_cmd_bits_status_tw), // @[RocketTile.scala:147:20] .io_in_bits_status_tvm (_core_io_rocc_cmd_bits_status_tvm), // @[RocketTile.scala:147:20] .io_in_bits_status_mxr (_core_io_rocc_cmd_bits_status_mxr), // @[RocketTile.scala:147:20] .io_in_bits_status_sum (_core_io_rocc_cmd_bits_status_sum), // @[RocketTile.scala:147:20] .io_in_bits_status_mprv (_core_io_rocc_cmd_bits_status_mprv), // @[RocketTile.scala:147:20] .io_in_bits_status_fs (_core_io_rocc_cmd_bits_status_fs), // @[RocketTile.scala:147:20] .io_in_bits_status_mpp (_core_io_rocc_cmd_bits_status_mpp), // @[RocketTile.scala:147:20] .io_in_bits_status_spp (_core_io_rocc_cmd_bits_status_spp), // @[RocketTile.scala:147:20] .io_in_bits_status_mpie (_core_io_rocc_cmd_bits_status_mpie), // @[RocketTile.scala:147:20] .io_in_bits_status_spie (_core_io_rocc_cmd_bits_status_spie), // @[RocketTile.scala:147:20] .io_in_bits_status_mie (_core_io_rocc_cmd_bits_status_mie), // @[RocketTile.scala:147:20] .io_in_bits_status_sie (_core_io_rocc_cmd_bits_status_sie), // @[RocketTile.scala:147:20] .io_out_0_ready (_rerocc_client_io_cmd_ready), // @[Configs.scala:14:35] .io_out_0_valid (_cmdRouter_io_out_0_valid), .io_out_0_bits_inst_funct (_cmdRouter_io_out_0_bits_inst_funct), .io_out_0_bits_inst_rs2 (_cmdRouter_io_out_0_bits_inst_rs2), .io_out_0_bits_inst_rs1 (_cmdRouter_io_out_0_bits_inst_rs1), .io_out_0_bits_inst_xd (_cmdRouter_io_out_0_bits_inst_xd), .io_out_0_bits_inst_xs1 (_cmdRouter_io_out_0_bits_inst_xs1), .io_out_0_bits_inst_xs2 (_cmdRouter_io_out_0_bits_inst_xs2), .io_out_0_bits_inst_rd (_cmdRouter_io_out_0_bits_inst_rd), .io_out_0_bits_inst_opcode (_cmdRouter_io_out_0_bits_inst_opcode), .io_out_0_bits_rs1 (_cmdRouter_io_out_0_bits_rs1), .io_out_0_bits_rs2 (_cmdRouter_io_out_0_bits_rs2), .io_out_0_bits_status_debug (_cmdRouter_io_out_0_bits_status_debug), .io_out_0_bits_status_cease (_cmdRouter_io_out_0_bits_status_cease), .io_out_0_bits_status_wfi (_cmdRouter_io_out_0_bits_status_wfi), .io_out_0_bits_status_isa (_cmdRouter_io_out_0_bits_status_isa), .io_out_0_bits_status_dprv (_cmdRouter_io_out_0_bits_status_dprv), .io_out_0_bits_status_dv (_cmdRouter_io_out_0_bits_status_dv), .io_out_0_bits_status_prv (_cmdRouter_io_out_0_bits_status_prv), .io_out_0_bits_status_v (_cmdRouter_io_out_0_bits_status_v), .io_out_0_bits_status_sd (_cmdRouter_io_out_0_bits_status_sd), .io_out_0_bits_status_zero2 (_cmdRouter_io_out_0_bits_status_zero2), .io_out_0_bits_status_mpv (_cmdRouter_io_out_0_bits_status_mpv), .io_out_0_bits_status_gva (_cmdRouter_io_out_0_bits_status_gva), .io_out_0_bits_status_mbe (_cmdRouter_io_out_0_bits_status_mbe), .io_out_0_bits_status_sbe (_cmdRouter_io_out_0_bits_status_sbe), .io_out_0_bits_status_sxl (_cmdRouter_io_out_0_bits_status_sxl), .io_out_0_bits_status_uxl (_cmdRouter_io_out_0_bits_status_uxl), .io_out_0_bits_status_sd_rv32 (_cmdRouter_io_out_0_bits_status_sd_rv32), .io_out_0_bits_status_zero1 (_cmdRouter_io_out_0_bits_status_zero1), .io_out_0_bits_status_tsr (_cmdRouter_io_out_0_bits_status_tsr), .io_out_0_bits_status_tw (_cmdRouter_io_out_0_bits_status_tw), .io_out_0_bits_status_tvm (_cmdRouter_io_out_0_bits_status_tvm), .io_out_0_bits_status_mxr (_cmdRouter_io_out_0_bits_status_mxr), .io_out_0_bits_status_sum (_cmdRouter_io_out_0_bits_status_sum), .io_out_0_bits_status_mprv (_cmdRouter_io_out_0_bits_status_mprv), .io_out_0_bits_status_xs (_cmdRouter_io_out_0_bits_status_xs), .io_out_0_bits_status_fs (_cmdRouter_io_out_0_bits_status_fs), .io_out_0_bits_status_mpp (_cmdRouter_io_out_0_bits_status_mpp), .io_out_0_bits_status_vs (_cmdRouter_io_out_0_bits_status_vs), .io_out_0_bits_status_spp (_cmdRouter_io_out_0_bits_status_spp), .io_out_0_bits_status_mpie (_cmdRouter_io_out_0_bits_status_mpie), .io_out_0_bits_status_ube (_cmdRouter_io_out_0_bits_status_ube), .io_out_0_bits_status_spie (_cmdRouter_io_out_0_bits_status_spie), .io_out_0_bits_status_upie (_cmdRouter_io_out_0_bits_status_upie), .io_out_0_bits_status_mie (_cmdRouter_io_out_0_bits_status_mie), .io_out_0_bits_status_hie (_cmdRouter_io_out_0_bits_status_hie), .io_out_0_bits_status_sie (_cmdRouter_io_out_0_bits_status_sie), .io_out_0_bits_status_uie (_cmdRouter_io_out_0_bits_status_uie), .io_busy (_cmdRouter_io_busy) ); // @[LazyRoCC.scala:102:27] SimpleHellaCacheIF dcIF ( // @[LazyRoCC.scala:106:24] .clock (clock), .reset (reset), .io_requestor_req_ready (_dcIF_io_requestor_req_ready), .io_requestor_resp_valid (_dcIF_io_requestor_resp_valid), .io_requestor_resp_bits_addr (_dcIF_io_requestor_resp_bits_addr), .io_requestor_resp_bits_tag (_dcIF_io_requestor_resp_bits_tag), .io_requestor_resp_bits_cmd (_dcIF_io_requestor_resp_bits_cmd), .io_requestor_resp_bits_size (_dcIF_io_requestor_resp_bits_size), .io_requestor_resp_bits_signed (_dcIF_io_requestor_resp_bits_signed), .io_requestor_resp_bits_dprv (_dcIF_io_requestor_resp_bits_dprv), .io_requestor_resp_bits_dv (_dcIF_io_requestor_resp_bits_dv), .io_requestor_resp_bits_data (_dcIF_io_requestor_resp_bits_data), .io_requestor_resp_bits_mask (_dcIF_io_requestor_resp_bits_mask), .io_requestor_resp_bits_replay (_dcIF_io_requestor_resp_bits_replay), .io_requestor_resp_bits_has_data (_dcIF_io_requestor_resp_bits_has_data), .io_requestor_resp_bits_data_word_bypass (_dcIF_io_requestor_resp_bits_data_word_bypass), .io_requestor_resp_bits_data_raw (_dcIF_io_requestor_resp_bits_data_raw), .io_requestor_resp_bits_store_data (_dcIF_io_requestor_resp_bits_store_data), .io_cache_req_ready (_dcacheArb_io_requestor_1_req_ready), // @[HellaCache.scala:292:25] .io_cache_req_valid (_dcIF_io_cache_req_valid), .io_cache_s1_data_data (_dcIF_io_cache_s1_data_data), .io_cache_s1_data_mask (_dcIF_io_cache_s1_data_mask), .io_cache_s2_nack (_dcacheArb_io_requestor_1_s2_nack), // @[HellaCache.scala:292:25] .io_cache_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), // @[HellaCache.scala:292:25] .io_cache_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), // @[HellaCache.scala:292:25] .io_cache_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), // @[HellaCache.scala:292:25] .io_cache_resp_valid (_dcacheArb_io_requestor_1_resp_valid), // @[HellaCache.scala:292:25] .io_cache_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), // @[HellaCache.scala:292:25] .io_cache_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), // @[HellaCache.scala:292:25] .io_cache_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), // @[HellaCache.scala:292:25] .io_cache_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), // @[HellaCache.scala:292:25] .io_cache_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), // @[HellaCache.scala:292:25] .io_cache_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), // @[HellaCache.scala:292:25] .io_cache_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), // @[HellaCache.scala:292:25] .io_cache_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), // @[HellaCache.scala:292:25] .io_cache_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), // @[HellaCache.scala:292:25] .io_cache_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), // @[HellaCache.scala:292:25] .io_cache_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), // @[HellaCache.scala:292:25] .io_cache_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25] .io_cache_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), // @[HellaCache.scala:292:25] .io_cache_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), // @[HellaCache.scala:292:25] .io_cache_replay_next (_dcacheArb_io_requestor_1_replay_next), // @[HellaCache.scala:292:25] .io_cache_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25] .io_cache_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), // @[HellaCache.scala:292:25] .io_cache_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25] .io_cache_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), // @[HellaCache.scala:292:25] .io_cache_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25] .io_cache_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), // @[HellaCache.scala:292:25] .io_cache_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), // @[HellaCache.scala:292:25] .io_cache_ordered (_dcacheArb_io_requestor_1_ordered), // @[HellaCache.scala:292:25] .io_cache_store_pending (_dcacheArb_io_requestor_1_store_pending), // @[HellaCache.scala:292:25] .io_cache_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), // @[HellaCache.scala:292:25] .io_cache_perf_release (_dcacheArb_io_requestor_1_perf_release), // @[HellaCache.scala:292:25] .io_cache_perf_grant (_dcacheArb_io_requestor_1_perf_grant), // @[HellaCache.scala:292:25] .io_cache_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss), // @[HellaCache.scala:292:25] .io_cache_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), // @[HellaCache.scala:292:25] .io_cache_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25] .io_cache_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25] .io_cache_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25] .io_cache_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25] .io_cache_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore) // @[HellaCache.scala:292:25] ); // @[LazyRoCC.scala:106:24] Queue2_RoCCResponse respArb_io_in_0_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (_respArb_io_in_0_q_io_enq_ready), .io_enq_valid (_rerocc_client_io_resp_valid), // @[Configs.scala:14:35] .io_enq_bits_rd (_rerocc_client_io_resp_bits_rd), // @[Configs.scala:14:35] .io_enq_bits_data (_rerocc_client_io_resp_bits_data), // @[Configs.scala:14:35] .io_deq_ready (_respArb_io_in_0_ready), // @[LazyRoCC.scala:101:25] .io_deq_valid (_respArb_io_in_0_q_io_deq_valid), .io_deq_bits_rd (_respArb_io_in_0_q_io_deq_bits_rd), .io_deq_bits_data (_respArb_io_in_0_q_io_deq_bits_data) ); // @[Decoupled.scala:362:21] Rocket core ( // @[RocketTile.scala:147:20] .clock (clock), .reset (reset), .io_hartid (hartIdSinkNodeIn), // @[MixedNode.scala:551:17] .io_interrupts_debug (intSinkNodeIn_0), // @[MixedNode.scala:551:17] .io_interrupts_mtip (intSinkNodeIn_2), // @[MixedNode.scala:551:17] .io_interrupts_msip (intSinkNodeIn_1), // @[MixedNode.scala:551:17] .io_interrupts_meip (intSinkNodeIn_3), // @[MixedNode.scala:551:17] .io_interrupts_seip (intSinkNodeIn_4), // @[MixedNode.scala:551:17] .io_imem_might_request (_core_io_imem_might_request), .io_imem_req_valid (_core_io_imem_req_valid), .io_imem_req_bits_pc (_core_io_imem_req_bits_pc), .io_imem_req_bits_speculative (_core_io_imem_req_bits_speculative), .io_imem_sfence_valid (_core_io_imem_sfence_valid), .io_imem_sfence_bits_rs1 (_core_io_imem_sfence_bits_rs1), .io_imem_sfence_bits_rs2 (_core_io_imem_sfence_bits_rs2), .io_imem_sfence_bits_addr (_core_io_imem_sfence_bits_addr), .io_imem_sfence_bits_asid (_core_io_imem_sfence_bits_asid), .io_imem_sfence_bits_hv (_core_io_imem_sfence_bits_hv), .io_imem_sfence_bits_hg (_core_io_imem_sfence_bits_hg), .io_imem_resp_ready (_core_io_imem_resp_ready), .io_imem_resp_valid (_frontend_io_cpu_resp_valid), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_cfiType (_frontend_io_cpu_resp_bits_btb_cfiType), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_taken (_frontend_io_cpu_resp_bits_btb_taken), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_mask (_frontend_io_cpu_resp_bits_btb_mask), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_bridx (_frontend_io_cpu_resp_bits_btb_bridx), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_target (_frontend_io_cpu_resp_bits_btb_target), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_entry (_frontend_io_cpu_resp_bits_btb_entry), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_bht_history (_frontend_io_cpu_resp_bits_btb_bht_history), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_bht_value (_frontend_io_cpu_resp_bits_btb_bht_value), // @[Frontend.scala:393:28] .io_imem_resp_bits_pc (_frontend_io_cpu_resp_bits_pc), // @[Frontend.scala:393:28] .io_imem_resp_bits_data (_frontend_io_cpu_resp_bits_data), // @[Frontend.scala:393:28] .io_imem_resp_bits_mask (_frontend_io_cpu_resp_bits_mask), // @[Frontend.scala:393:28] .io_imem_resp_bits_xcpt_pf_inst (_frontend_io_cpu_resp_bits_xcpt_pf_inst), // @[Frontend.scala:393:28] .io_imem_resp_bits_xcpt_gf_inst (_frontend_io_cpu_resp_bits_xcpt_gf_inst), // @[Frontend.scala:393:28] .io_imem_resp_bits_xcpt_ae_inst (_frontend_io_cpu_resp_bits_xcpt_ae_inst), // @[Frontend.scala:393:28] .io_imem_resp_bits_replay (_frontend_io_cpu_resp_bits_replay), // @[Frontend.scala:393:28] .io_imem_gpa_valid (_frontend_io_cpu_gpa_valid), // @[Frontend.scala:393:28] .io_imem_gpa_bits (_frontend_io_cpu_gpa_bits), // @[Frontend.scala:393:28] .io_imem_gpa_is_pte (_frontend_io_cpu_gpa_is_pte), // @[Frontend.scala:393:28] .io_imem_btb_update_valid (_core_io_imem_btb_update_valid), .io_imem_btb_update_bits_prediction_cfiType (_core_io_imem_btb_update_bits_prediction_cfiType), .io_imem_btb_update_bits_prediction_taken (_core_io_imem_btb_update_bits_prediction_taken), .io_imem_btb_update_bits_prediction_mask (_core_io_imem_btb_update_bits_prediction_mask), .io_imem_btb_update_bits_prediction_bridx (_core_io_imem_btb_update_bits_prediction_bridx), .io_imem_btb_update_bits_prediction_target (_core_io_imem_btb_update_bits_prediction_target), .io_imem_btb_update_bits_prediction_entry (_core_io_imem_btb_update_bits_prediction_entry), .io_imem_btb_update_bits_prediction_bht_history (_core_io_imem_btb_update_bits_prediction_bht_history), .io_imem_btb_update_bits_prediction_bht_value (_core_io_imem_btb_update_bits_prediction_bht_value), .io_imem_btb_update_bits_pc (_core_io_imem_btb_update_bits_pc), .io_imem_btb_update_bits_target (_core_io_imem_btb_update_bits_target), .io_imem_btb_update_bits_isValid (_core_io_imem_btb_update_bits_isValid), .io_imem_btb_update_bits_br_pc (_core_io_imem_btb_update_bits_br_pc), .io_imem_btb_update_bits_cfiType (_core_io_imem_btb_update_bits_cfiType), .io_imem_bht_update_valid (_core_io_imem_bht_update_valid), .io_imem_bht_update_bits_prediction_history (_core_io_imem_bht_update_bits_prediction_history), .io_imem_bht_update_bits_prediction_value (_core_io_imem_bht_update_bits_prediction_value), .io_imem_bht_update_bits_pc (_core_io_imem_bht_update_bits_pc), .io_imem_bht_update_bits_branch (_core_io_imem_bht_update_bits_branch), .io_imem_bht_update_bits_taken (_core_io_imem_bht_update_bits_taken), .io_imem_bht_update_bits_mispredict (_core_io_imem_bht_update_bits_mispredict), .io_imem_flush_icache (_core_io_imem_flush_icache), .io_imem_npc (_frontend_io_cpu_npc), // @[Frontend.scala:393:28] .io_imem_perf_acquire (_frontend_io_cpu_perf_acquire), // @[Frontend.scala:393:28] .io_imem_perf_tlbMiss (_frontend_io_cpu_perf_tlbMiss), // @[Frontend.scala:393:28] .io_imem_progress (_core_io_imem_progress), .io_dmem_req_ready (_dcacheArb_io_requestor_2_req_ready), // @[HellaCache.scala:292:25] .io_dmem_req_valid (_core_io_dmem_req_valid), .io_dmem_req_bits_addr (_core_io_dmem_req_bits_addr), .io_dmem_req_bits_tag (_core_io_dmem_req_bits_tag), .io_dmem_req_bits_cmd (_core_io_dmem_req_bits_cmd), .io_dmem_req_bits_size (_core_io_dmem_req_bits_size), .io_dmem_req_bits_signed (_core_io_dmem_req_bits_signed), .io_dmem_req_bits_dprv (_core_io_dmem_req_bits_dprv), .io_dmem_req_bits_dv (_core_io_dmem_req_bits_dv), .io_dmem_req_bits_no_resp (_core_io_dmem_req_bits_no_resp), .io_dmem_s1_kill (_core_io_dmem_s1_kill), .io_dmem_s1_data_data (_core_io_dmem_s1_data_data), .io_dmem_s2_nack (_dcacheArb_io_requestor_2_s2_nack), // @[HellaCache.scala:292:25] .io_dmem_s2_nack_cause_raw (_dcacheArb_io_requestor_2_s2_nack_cause_raw), // @[HellaCache.scala:292:25] .io_dmem_s2_uncached (_dcacheArb_io_requestor_2_s2_uncached), // @[HellaCache.scala:292:25] .io_dmem_s2_paddr (_dcacheArb_io_requestor_2_s2_paddr), // @[HellaCache.scala:292:25] .io_dmem_resp_valid (_dcacheArb_io_requestor_2_resp_valid), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_addr (_dcacheArb_io_requestor_2_resp_bits_addr), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_tag (_dcacheArb_io_requestor_2_resp_bits_tag), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_cmd (_dcacheArb_io_requestor_2_resp_bits_cmd), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_size (_dcacheArb_io_requestor_2_resp_bits_size), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_signed (_dcacheArb_io_requestor_2_resp_bits_signed), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_dprv (_dcacheArb_io_requestor_2_resp_bits_dprv), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_dv (_dcacheArb_io_requestor_2_resp_bits_dv), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_data (_dcacheArb_io_requestor_2_resp_bits_data), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_mask (_dcacheArb_io_requestor_2_resp_bits_mask), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_replay (_dcacheArb_io_requestor_2_resp_bits_replay), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_has_data (_dcacheArb_io_requestor_2_resp_bits_has_data), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_2_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_data_raw (_dcacheArb_io_requestor_2_resp_bits_data_raw), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_store_data (_dcacheArb_io_requestor_2_resp_bits_store_data), // @[HellaCache.scala:292:25] .io_dmem_replay_next (_dcacheArb_io_requestor_2_replay_next), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_2_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ma_st (_dcacheArb_io_requestor_2_s2_xcpt_ma_st), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_2_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_pf_st (_dcacheArb_io_requestor_2_s2_xcpt_pf_st), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_2_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ae_st (_dcacheArb_io_requestor_2_s2_xcpt_ae_st), // @[HellaCache.scala:292:25] .io_dmem_s2_gpa (_dcacheArb_io_requestor_2_s2_gpa), // @[HellaCache.scala:292:25] .io_dmem_ordered (_dcacheArb_io_requestor_2_ordered), // @[HellaCache.scala:292:25] .io_dmem_store_pending (_dcacheArb_io_requestor_2_store_pending), // @[HellaCache.scala:292:25] .io_dmem_perf_acquire (_dcacheArb_io_requestor_2_perf_acquire), // @[HellaCache.scala:292:25] .io_dmem_perf_release (_dcacheArb_io_requestor_2_perf_release), // @[HellaCache.scala:292:25] .io_dmem_perf_grant (_dcacheArb_io_requestor_2_perf_grant), // @[HellaCache.scala:292:25] .io_dmem_perf_tlbMiss (_dcacheArb_io_requestor_2_perf_tlbMiss), // @[HellaCache.scala:292:25] .io_dmem_perf_blocked (_dcacheArb_io_requestor_2_perf_blocked), // @[HellaCache.scala:292:25] .io_dmem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_2_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25] .io_dmem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_2_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25] .io_dmem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_2_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25] .io_dmem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_2_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25] .io_dmem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_2_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:292:25] .io_dmem_keep_clock_enabled (_core_io_dmem_keep_clock_enabled), .io_ptw_ptbr_mode (_core_io_ptw_ptbr_mode), .io_ptw_ptbr_ppn (_core_io_ptw_ptbr_ppn), .io_ptw_sfence_valid (_core_io_ptw_sfence_valid), .io_ptw_sfence_bits_rs1 (_core_io_ptw_sfence_bits_rs1), .io_ptw_sfence_bits_rs2 (_core_io_ptw_sfence_bits_rs2), .io_ptw_sfence_bits_addr (_core_io_ptw_sfence_bits_addr), .io_ptw_sfence_bits_asid (_core_io_ptw_sfence_bits_asid), .io_ptw_sfence_bits_hv (_core_io_ptw_sfence_bits_hv), .io_ptw_sfence_bits_hg (_core_io_ptw_sfence_bits_hg), .io_ptw_status_debug (_core_io_ptw_status_debug), .io_ptw_status_cease (_core_io_ptw_status_cease), .io_ptw_status_wfi (_core_io_ptw_status_wfi), .io_ptw_status_isa (_core_io_ptw_status_isa), .io_ptw_status_dprv (_core_io_ptw_status_dprv), .io_ptw_status_dv (_core_io_ptw_status_dv), .io_ptw_status_prv (_core_io_ptw_status_prv), .io_ptw_status_v (_core_io_ptw_status_v), .io_ptw_status_mpv (_core_io_ptw_status_mpv), .io_ptw_status_gva (_core_io_ptw_status_gva), .io_ptw_status_tsr (_core_io_ptw_status_tsr), .io_ptw_status_tw (_core_io_ptw_status_tw), .io_ptw_status_tvm (_core_io_ptw_status_tvm), .io_ptw_status_mxr (_core_io_ptw_status_mxr), .io_ptw_status_sum (_core_io_ptw_status_sum), .io_ptw_status_mprv (_core_io_ptw_status_mprv), .io_ptw_status_fs (_core_io_ptw_status_fs), .io_ptw_status_mpp (_core_io_ptw_status_mpp), .io_ptw_status_spp (_core_io_ptw_status_spp), .io_ptw_status_mpie (_core_io_ptw_status_mpie), .io_ptw_status_spie (_core_io_ptw_status_spie), .io_ptw_status_mie (_core_io_ptw_status_mie), .io_ptw_status_sie (_core_io_ptw_status_sie), .io_ptw_hstatus_spvp (_core_io_ptw_hstatus_spvp), .io_ptw_hstatus_spv (_core_io_ptw_hstatus_spv), .io_ptw_hstatus_gva (_core_io_ptw_hstatus_gva), .io_ptw_gstatus_debug (_core_io_ptw_gstatus_debug), .io_ptw_gstatus_cease (_core_io_ptw_gstatus_cease), .io_ptw_gstatus_wfi (_core_io_ptw_gstatus_wfi), .io_ptw_gstatus_isa (_core_io_ptw_gstatus_isa), .io_ptw_gstatus_dprv (_core_io_ptw_gstatus_dprv), .io_ptw_gstatus_dv (_core_io_ptw_gstatus_dv), .io_ptw_gstatus_prv (_core_io_ptw_gstatus_prv), .io_ptw_gstatus_v (_core_io_ptw_gstatus_v), .io_ptw_gstatus_zero2 (_core_io_ptw_gstatus_zero2), .io_ptw_gstatus_mpv (_core_io_ptw_gstatus_mpv), .io_ptw_gstatus_gva (_core_io_ptw_gstatus_gva), .io_ptw_gstatus_mbe (_core_io_ptw_gstatus_mbe), .io_ptw_gstatus_sbe (_core_io_ptw_gstatus_sbe), .io_ptw_gstatus_sxl (_core_io_ptw_gstatus_sxl), .io_ptw_gstatus_zero1 (_core_io_ptw_gstatus_zero1), .io_ptw_gstatus_tsr (_core_io_ptw_gstatus_tsr), .io_ptw_gstatus_tw (_core_io_ptw_gstatus_tw), .io_ptw_gstatus_tvm (_core_io_ptw_gstatus_tvm), .io_ptw_gstatus_mxr (_core_io_ptw_gstatus_mxr), .io_ptw_gstatus_sum (_core_io_ptw_gstatus_sum), .io_ptw_gstatus_mprv (_core_io_ptw_gstatus_mprv), .io_ptw_gstatus_fs (_core_io_ptw_gstatus_fs), .io_ptw_gstatus_mpp (_core_io_ptw_gstatus_mpp), .io_ptw_gstatus_vs (_core_io_ptw_gstatus_vs), .io_ptw_gstatus_spp (_core_io_ptw_gstatus_spp), .io_ptw_gstatus_mpie (_core_io_ptw_gstatus_mpie), .io_ptw_gstatus_ube (_core_io_ptw_gstatus_ube), .io_ptw_gstatus_spie (_core_io_ptw_gstatus_spie), .io_ptw_gstatus_upie (_core_io_ptw_gstatus_upie), .io_ptw_gstatus_mie (_core_io_ptw_gstatus_mie), .io_ptw_gstatus_hie (_core_io_ptw_gstatus_hie), .io_ptw_gstatus_sie (_core_io_ptw_gstatus_sie), .io_ptw_gstatus_uie (_core_io_ptw_gstatus_uie), .io_ptw_pmp_0_cfg_l (_core_io_ptw_pmp_0_cfg_l), .io_ptw_pmp_0_cfg_a (_core_io_ptw_pmp_0_cfg_a), .io_ptw_pmp_0_cfg_x (_core_io_ptw_pmp_0_cfg_x), .io_ptw_pmp_0_cfg_w (_core_io_ptw_pmp_0_cfg_w), .io_ptw_pmp_0_cfg_r (_core_io_ptw_pmp_0_cfg_r), .io_ptw_pmp_0_addr (_core_io_ptw_pmp_0_addr), .io_ptw_pmp_0_mask (_core_io_ptw_pmp_0_mask), .io_ptw_pmp_1_cfg_l (_core_io_ptw_pmp_1_cfg_l), .io_ptw_pmp_1_cfg_a (_core_io_ptw_pmp_1_cfg_a), .io_ptw_pmp_1_cfg_x (_core_io_ptw_pmp_1_cfg_x), .io_ptw_pmp_1_cfg_w (_core_io_ptw_pmp_1_cfg_w), .io_ptw_pmp_1_cfg_r (_core_io_ptw_pmp_1_cfg_r), .io_ptw_pmp_1_addr (_core_io_ptw_pmp_1_addr), .io_ptw_pmp_1_mask (_core_io_ptw_pmp_1_mask), .io_ptw_pmp_2_cfg_l (_core_io_ptw_pmp_2_cfg_l), .io_ptw_pmp_2_cfg_a (_core_io_ptw_pmp_2_cfg_a), .io_ptw_pmp_2_cfg_x (_core_io_ptw_pmp_2_cfg_x), .io_ptw_pmp_2_cfg_w (_core_io_ptw_pmp_2_cfg_w), .io_ptw_pmp_2_cfg_r (_core_io_ptw_pmp_2_cfg_r), .io_ptw_pmp_2_addr (_core_io_ptw_pmp_2_addr), .io_ptw_pmp_2_mask (_core_io_ptw_pmp_2_mask), .io_ptw_pmp_3_cfg_l (_core_io_ptw_pmp_3_cfg_l), .io_ptw_pmp_3_cfg_a (_core_io_ptw_pmp_3_cfg_a), .io_ptw_pmp_3_cfg_x (_core_io_ptw_pmp_3_cfg_x), .io_ptw_pmp_3_cfg_w (_core_io_ptw_pmp_3_cfg_w), .io_ptw_pmp_3_cfg_r (_core_io_ptw_pmp_3_cfg_r), .io_ptw_pmp_3_addr (_core_io_ptw_pmp_3_addr), .io_ptw_pmp_3_mask (_core_io_ptw_pmp_3_mask), .io_ptw_pmp_4_cfg_l (_core_io_ptw_pmp_4_cfg_l), .io_ptw_pmp_4_cfg_a (_core_io_ptw_pmp_4_cfg_a), .io_ptw_pmp_4_cfg_x (_core_io_ptw_pmp_4_cfg_x), .io_ptw_pmp_4_cfg_w (_core_io_ptw_pmp_4_cfg_w), .io_ptw_pmp_4_cfg_r (_core_io_ptw_pmp_4_cfg_r), .io_ptw_pmp_4_addr (_core_io_ptw_pmp_4_addr), .io_ptw_pmp_4_mask (_core_io_ptw_pmp_4_mask), .io_ptw_pmp_5_cfg_l (_core_io_ptw_pmp_5_cfg_l), .io_ptw_pmp_5_cfg_a (_core_io_ptw_pmp_5_cfg_a), .io_ptw_pmp_5_cfg_x (_core_io_ptw_pmp_5_cfg_x), .io_ptw_pmp_5_cfg_w (_core_io_ptw_pmp_5_cfg_w), .io_ptw_pmp_5_cfg_r (_core_io_ptw_pmp_5_cfg_r), .io_ptw_pmp_5_addr (_core_io_ptw_pmp_5_addr), .io_ptw_pmp_5_mask (_core_io_ptw_pmp_5_mask), .io_ptw_pmp_6_cfg_l (_core_io_ptw_pmp_6_cfg_l), .io_ptw_pmp_6_cfg_a (_core_io_ptw_pmp_6_cfg_a), .io_ptw_pmp_6_cfg_x (_core_io_ptw_pmp_6_cfg_x), .io_ptw_pmp_6_cfg_w (_core_io_ptw_pmp_6_cfg_w), .io_ptw_pmp_6_cfg_r (_core_io_ptw_pmp_6_cfg_r), .io_ptw_pmp_6_addr (_core_io_ptw_pmp_6_addr), .io_ptw_pmp_6_mask (_core_io_ptw_pmp_6_mask), .io_ptw_pmp_7_cfg_l (_core_io_ptw_pmp_7_cfg_l), .io_ptw_pmp_7_cfg_a (_core_io_ptw_pmp_7_cfg_a), .io_ptw_pmp_7_cfg_x (_core_io_ptw_pmp_7_cfg_x), .io_ptw_pmp_7_cfg_w (_core_io_ptw_pmp_7_cfg_w), .io_ptw_pmp_7_cfg_r (_core_io_ptw_pmp_7_cfg_r), .io_ptw_pmp_7_addr (_core_io_ptw_pmp_7_addr), .io_ptw_pmp_7_mask (_core_io_ptw_pmp_7_mask), .io_ptw_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), // @[PTW.scala:802:19] .io_ptw_perf_pte_hit (_ptw_io_dpath_perf_pte_hit), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_ren (_core_io_ptw_customCSRs_csrs_0_ren), .io_ptw_customCSRs_csrs_0_wen (_core_io_ptw_customCSRs_csrs_0_wen), .io_ptw_customCSRs_csrs_0_wdata (_core_io_ptw_customCSRs_csrs_0_wdata), .io_ptw_customCSRs_csrs_0_value (_core_io_ptw_customCSRs_csrs_0_value), .io_ptw_customCSRs_csrs_1_ren (_core_io_ptw_customCSRs_csrs_1_ren), .io_ptw_customCSRs_csrs_1_wen (_core_io_ptw_customCSRs_csrs_1_wen), .io_ptw_customCSRs_csrs_1_wdata (_core_io_ptw_customCSRs_csrs_1_wdata), .io_ptw_customCSRs_csrs_1_value (_core_io_ptw_customCSRs_csrs_1_value), .io_ptw_customCSRs_csrs_2_ren (_core_io_ptw_customCSRs_csrs_2_ren), .io_ptw_customCSRs_csrs_2_wen (_core_io_ptw_customCSRs_csrs_2_wen), .io_ptw_customCSRs_csrs_2_wdata (_core_io_ptw_customCSRs_csrs_2_wdata), .io_ptw_customCSRs_csrs_2_value (_core_io_ptw_customCSRs_csrs_2_value), .io_ptw_customCSRs_csrs_3_ren (_core_io_ptw_customCSRs_csrs_3_ren), .io_ptw_customCSRs_csrs_3_wen (_core_io_ptw_customCSRs_csrs_3_wen), .io_ptw_customCSRs_csrs_3_wdata (_core_io_ptw_customCSRs_csrs_3_wdata), .io_ptw_customCSRs_csrs_3_value (_core_io_ptw_customCSRs_csrs_3_value), .io_ptw_clock_enabled (_ptw_io_dpath_clock_enabled), // @[PTW.scala:802:19] .io_fpu_hartid (_core_io_fpu_hartid), .io_fpu_time (_core_io_fpu_time), .io_fpu_inst (_core_io_fpu_inst), .io_fpu_fromint_data (_core_io_fpu_fromint_data), .io_fpu_fcsr_rm (_core_io_fpu_fcsr_rm), .io_fpu_fcsr_flags_valid (_fpuOpt_io_fcsr_flags_valid), // @[RocketTile.scala:242:62] .io_fpu_fcsr_flags_bits (_fpuOpt_io_fcsr_flags_bits), // @[RocketTile.scala:242:62] .io_fpu_store_data (_fpuOpt_io_store_data), // @[RocketTile.scala:242:62] .io_fpu_toint_data (_fpuOpt_io_toint_data), // @[RocketTile.scala:242:62] .io_fpu_ll_resp_val (_core_io_fpu_ll_resp_val), .io_fpu_ll_resp_type (_core_io_fpu_ll_resp_type), .io_fpu_ll_resp_tag (_core_io_fpu_ll_resp_tag), .io_fpu_ll_resp_data (_core_io_fpu_ll_resp_data), .io_fpu_valid (_core_io_fpu_valid), .io_fpu_fcsr_rdy (_fpuOpt_io_fcsr_rdy), // @[RocketTile.scala:242:62] .io_fpu_nack_mem (_fpuOpt_io_nack_mem), // @[RocketTile.scala:242:62] .io_fpu_illegal_rm (_fpuOpt_io_illegal_rm), // @[RocketTile.scala:242:62] .io_fpu_killx (_core_io_fpu_killx), .io_fpu_killm (_core_io_fpu_killm), .io_fpu_dec_ldst (_fpuOpt_io_dec_ldst), // @[RocketTile.scala:242:62] .io_fpu_dec_wen (_fpuOpt_io_dec_wen), // @[RocketTile.scala:242:62] .io_fpu_dec_ren1 (_fpuOpt_io_dec_ren1), // @[RocketTile.scala:242:62] .io_fpu_dec_ren2 (_fpuOpt_io_dec_ren2), // @[RocketTile.scala:242:62] .io_fpu_dec_ren3 (_fpuOpt_io_dec_ren3), // @[RocketTile.scala:242:62] .io_fpu_dec_swap12 (_fpuOpt_io_dec_swap12), // @[RocketTile.scala:242:62] .io_fpu_dec_swap23 (_fpuOpt_io_dec_swap23), // @[RocketTile.scala:242:62] .io_fpu_dec_typeTagIn (_fpuOpt_io_dec_typeTagIn), // @[RocketTile.scala:242:62] .io_fpu_dec_typeTagOut (_fpuOpt_io_dec_typeTagOut), // @[RocketTile.scala:242:62] .io_fpu_dec_fromint (_fpuOpt_io_dec_fromint), // @[RocketTile.scala:242:62] .io_fpu_dec_toint (_fpuOpt_io_dec_toint), // @[RocketTile.scala:242:62] .io_fpu_dec_fastpipe (_fpuOpt_io_dec_fastpipe), // @[RocketTile.scala:242:62] .io_fpu_dec_fma (_fpuOpt_io_dec_fma), // @[RocketTile.scala:242:62] .io_fpu_dec_div (_fpuOpt_io_dec_div), // @[RocketTile.scala:242:62] .io_fpu_dec_sqrt (_fpuOpt_io_dec_sqrt), // @[RocketTile.scala:242:62] .io_fpu_dec_wflags (_fpuOpt_io_dec_wflags), // @[RocketTile.scala:242:62] .io_fpu_dec_vec (_fpuOpt_io_dec_vec), // @[RocketTile.scala:242:62] .io_fpu_sboard_set (_fpuOpt_io_sboard_set), // @[RocketTile.scala:242:62] .io_fpu_sboard_clr (_fpuOpt_io_sboard_clr), // @[RocketTile.scala:242:62] .io_fpu_sboard_clra (_fpuOpt_io_sboard_clra), // @[RocketTile.scala:242:62] .io_fpu_keep_clock_enabled (_core_io_fpu_keep_clock_enabled), .io_rocc_cmd_ready (_cmdRouter_io_in_ready), // @[LazyRoCC.scala:102:27] .io_rocc_cmd_valid (_core_io_rocc_cmd_valid), .io_rocc_cmd_bits_inst_funct (_core_io_rocc_cmd_bits_inst_funct), .io_rocc_cmd_bits_inst_rs2 (_core_io_rocc_cmd_bits_inst_rs2), .io_rocc_cmd_bits_inst_rs1 (_core_io_rocc_cmd_bits_inst_rs1), .io_rocc_cmd_bits_inst_xd (_core_io_rocc_cmd_bits_inst_xd), .io_rocc_cmd_bits_inst_xs1 (_core_io_rocc_cmd_bits_inst_xs1), .io_rocc_cmd_bits_inst_xs2 (_core_io_rocc_cmd_bits_inst_xs2), .io_rocc_cmd_bits_inst_rd (_core_io_rocc_cmd_bits_inst_rd), .io_rocc_cmd_bits_inst_opcode (_core_io_rocc_cmd_bits_inst_opcode), .io_rocc_cmd_bits_rs1 (_core_io_rocc_cmd_bits_rs1), .io_rocc_cmd_bits_rs2 (_core_io_rocc_cmd_bits_rs2), .io_rocc_cmd_bits_status_debug (_core_io_rocc_cmd_bits_status_debug), .io_rocc_cmd_bits_status_cease (_core_io_rocc_cmd_bits_status_cease), .io_rocc_cmd_bits_status_wfi (_core_io_rocc_cmd_bits_status_wfi), .io_rocc_cmd_bits_status_isa (_core_io_rocc_cmd_bits_status_isa), .io_rocc_cmd_bits_status_dprv (_core_io_rocc_cmd_bits_status_dprv), .io_rocc_cmd_bits_status_dv (_core_io_rocc_cmd_bits_status_dv), .io_rocc_cmd_bits_status_prv (_core_io_rocc_cmd_bits_status_prv), .io_rocc_cmd_bits_status_v (_core_io_rocc_cmd_bits_status_v), .io_rocc_cmd_bits_status_mpv (_core_io_rocc_cmd_bits_status_mpv), .io_rocc_cmd_bits_status_gva (_core_io_rocc_cmd_bits_status_gva), .io_rocc_cmd_bits_status_tsr (_core_io_rocc_cmd_bits_status_tsr), .io_rocc_cmd_bits_status_tw (_core_io_rocc_cmd_bits_status_tw), .io_rocc_cmd_bits_status_tvm (_core_io_rocc_cmd_bits_status_tvm), .io_rocc_cmd_bits_status_mxr (_core_io_rocc_cmd_bits_status_mxr), .io_rocc_cmd_bits_status_sum (_core_io_rocc_cmd_bits_status_sum), .io_rocc_cmd_bits_status_mprv (_core_io_rocc_cmd_bits_status_mprv), .io_rocc_cmd_bits_status_fs (_core_io_rocc_cmd_bits_status_fs), .io_rocc_cmd_bits_status_mpp (_core_io_rocc_cmd_bits_status_mpp), .io_rocc_cmd_bits_status_spp (_core_io_rocc_cmd_bits_status_spp), .io_rocc_cmd_bits_status_mpie (_core_io_rocc_cmd_bits_status_mpie), .io_rocc_cmd_bits_status_spie (_core_io_rocc_cmd_bits_status_spie), .io_rocc_cmd_bits_status_mie (_core_io_rocc_cmd_bits_status_mie), .io_rocc_cmd_bits_status_sie (_core_io_rocc_cmd_bits_status_sie), .io_rocc_resp_ready (_core_io_rocc_resp_ready), .io_rocc_resp_valid (_respArb_io_out_valid), // @[LazyRoCC.scala:101:25] .io_rocc_resp_bits_rd (_respArb_io_out_bits_rd), // @[LazyRoCC.scala:101:25] .io_rocc_resp_bits_data (_respArb_io_out_bits_data), // @[LazyRoCC.scala:101:25] .io_rocc_busy (_core_io_rocc_busy_T), // @[RocketTile.scala:213:49] .io_rocc_exception (_core_io_rocc_exception), .io_rocc_csrs_0_ren (_core_io_rocc_csrs_0_ren), .io_rocc_csrs_0_wen (_core_io_rocc_csrs_0_wen), .io_rocc_csrs_0_wdata (_core_io_rocc_csrs_0_wdata), .io_rocc_csrs_0_value (_core_io_rocc_csrs_0_value), .io_rocc_csrs_0_sdata (_rerocc_client_io_csrs_0_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_1_ren (_core_io_rocc_csrs_1_ren), .io_rocc_csrs_1_wen (_core_io_rocc_csrs_1_wen), .io_rocc_csrs_1_wdata (_core_io_rocc_csrs_1_wdata), .io_rocc_csrs_1_value (_core_io_rocc_csrs_1_value), .io_rocc_csrs_1_sdata (_rerocc_client_io_csrs_1_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_2_ren (_core_io_rocc_csrs_2_ren), .io_rocc_csrs_2_wen (_core_io_rocc_csrs_2_wen), .io_rocc_csrs_2_wdata (_core_io_rocc_csrs_2_wdata), .io_rocc_csrs_2_value (_core_io_rocc_csrs_2_value), .io_rocc_csrs_2_sdata (_rerocc_client_io_csrs_2_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_3_ren (_core_io_rocc_csrs_3_ren), .io_rocc_csrs_3_wen (_core_io_rocc_csrs_3_wen), .io_rocc_csrs_3_wdata (_core_io_rocc_csrs_3_wdata), .io_rocc_csrs_3_value (_core_io_rocc_csrs_3_value), .io_rocc_csrs_3_sdata (_rerocc_client_io_csrs_3_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_4_ren (_core_io_rocc_csrs_4_ren), .io_rocc_csrs_4_wen (_core_io_rocc_csrs_4_wen), .io_rocc_csrs_4_wdata (_core_io_rocc_csrs_4_wdata), .io_rocc_csrs_4_value (_core_io_rocc_csrs_4_value), .io_rocc_csrs_5_ren (_core_io_rocc_csrs_5_ren), .io_rocc_csrs_5_wen (_core_io_rocc_csrs_5_wen), .io_rocc_csrs_5_wdata (_core_io_rocc_csrs_5_wdata), .io_rocc_csrs_5_value (_core_io_rocc_csrs_5_value), .io_rocc_csrs_5_stall (_rerocc_client_io_csrs_5_stall), // @[Configs.scala:14:35] .io_rocc_csrs_5_sdata (_rerocc_client_io_csrs_5_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_6_ren (_core_io_rocc_csrs_6_ren), .io_rocc_csrs_6_wen (_core_io_rocc_csrs_6_wen), .io_rocc_csrs_6_wdata (_core_io_rocc_csrs_6_wdata), .io_rocc_csrs_6_value (_core_io_rocc_csrs_6_value), .io_rocc_csrs_6_stall (_rerocc_client_io_csrs_6_stall), // @[Configs.scala:14:35] .io_rocc_csrs_6_sdata (_rerocc_client_io_csrs_6_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_7_ren (_core_io_rocc_csrs_7_ren), .io_rocc_csrs_7_wen (_core_io_rocc_csrs_7_wen), .io_rocc_csrs_7_wdata (_core_io_rocc_csrs_7_wdata), .io_rocc_csrs_7_value (_core_io_rocc_csrs_7_value), .io_rocc_csrs_7_stall (_rerocc_client_io_csrs_7_stall), // @[Configs.scala:14:35] .io_rocc_csrs_7_sdata (_rerocc_client_io_csrs_7_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_8_ren (_core_io_rocc_csrs_8_ren), .io_rocc_csrs_8_wen (_core_io_rocc_csrs_8_wen), .io_rocc_csrs_8_wdata (_core_io_rocc_csrs_8_wdata), .io_rocc_csrs_8_value (_core_io_rocc_csrs_8_value), .io_rocc_csrs_8_stall (_rerocc_client_io_csrs_8_stall), // @[Configs.scala:14:35] .io_rocc_csrs_8_sdata (_rerocc_client_io_csrs_8_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_9_ren (_core_io_rocc_csrs_9_ren), .io_rocc_csrs_9_wen (_core_io_rocc_csrs_9_wen), .io_rocc_csrs_9_wdata (_core_io_rocc_csrs_9_wdata), .io_rocc_csrs_9_value (_core_io_rocc_csrs_9_value), .io_rocc_csrs_9_stall (_rerocc_client_io_csrs_9_stall), // @[Configs.scala:14:35] .io_rocc_csrs_9_sdata (_rerocc_client_io_csrs_9_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_10_ren (_core_io_rocc_csrs_10_ren), .io_rocc_csrs_10_wen (_core_io_rocc_csrs_10_wen), .io_rocc_csrs_10_wdata (_core_io_rocc_csrs_10_wdata), .io_rocc_csrs_10_value (_core_io_rocc_csrs_10_value), .io_rocc_csrs_10_stall (_rerocc_client_io_csrs_10_stall), // @[Configs.scala:14:35] .io_rocc_csrs_10_sdata (_rerocc_client_io_csrs_10_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_11_ren (_core_io_rocc_csrs_11_ren), .io_rocc_csrs_11_wen (_core_io_rocc_csrs_11_wen), .io_rocc_csrs_11_wdata (_core_io_rocc_csrs_11_wdata), .io_rocc_csrs_11_value (_core_io_rocc_csrs_11_value), .io_rocc_csrs_11_stall (_rerocc_client_io_csrs_11_stall), // @[Configs.scala:14:35] .io_rocc_csrs_11_sdata (_rerocc_client_io_csrs_11_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_12_ren (_core_io_rocc_csrs_12_ren), .io_rocc_csrs_12_wen (_core_io_rocc_csrs_12_wen), .io_rocc_csrs_12_wdata (_core_io_rocc_csrs_12_wdata), .io_rocc_csrs_12_value (_core_io_rocc_csrs_12_value), .io_rocc_csrs_12_stall (_rerocc_client_io_csrs_12_stall), // @[Configs.scala:14:35] .io_rocc_csrs_12_sdata (_rerocc_client_io_csrs_12_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_13_ren (_core_io_rocc_csrs_13_ren), .io_rocc_csrs_13_wen (_core_io_rocc_csrs_13_wen), .io_rocc_csrs_13_wdata (_core_io_rocc_csrs_13_wdata), .io_rocc_csrs_13_value (_core_io_rocc_csrs_13_value), .io_rocc_csrs_13_stall (_rerocc_client_io_csrs_13_stall), // @[Configs.scala:14:35] .io_rocc_csrs_13_sdata (_rerocc_client_io_csrs_13_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_14_ren (_core_io_rocc_csrs_14_ren), .io_rocc_csrs_14_wen (_core_io_rocc_csrs_14_wen), .io_rocc_csrs_14_wdata (_core_io_rocc_csrs_14_wdata), .io_rocc_csrs_14_value (_core_io_rocc_csrs_14_value), .io_rocc_csrs_14_stall (_rerocc_client_io_csrs_14_stall), // @[Configs.scala:14:35] .io_rocc_csrs_14_sdata (_rerocc_client_io_csrs_14_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_15_ren (_core_io_rocc_csrs_15_ren), .io_rocc_csrs_15_wen (_core_io_rocc_csrs_15_wen), .io_rocc_csrs_15_wdata (_core_io_rocc_csrs_15_wdata), .io_rocc_csrs_15_value (_core_io_rocc_csrs_15_value), .io_rocc_csrs_15_stall (_rerocc_client_io_csrs_15_stall), // @[Configs.scala:14:35] .io_rocc_csrs_15_sdata (_rerocc_client_io_csrs_15_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_16_ren (_core_io_rocc_csrs_16_ren), .io_rocc_csrs_16_wen (_core_io_rocc_csrs_16_wen), .io_rocc_csrs_16_wdata (_core_io_rocc_csrs_16_wdata), .io_rocc_csrs_16_value (_core_io_rocc_csrs_16_value), .io_rocc_csrs_16_stall (_rerocc_client_io_csrs_16_stall), // @[Configs.scala:14:35] .io_rocc_csrs_16_sdata (_rerocc_client_io_csrs_16_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_17_ren (_core_io_rocc_csrs_17_ren), .io_rocc_csrs_17_wen (_core_io_rocc_csrs_17_wen), .io_rocc_csrs_17_wdata (_core_io_rocc_csrs_17_wdata), .io_rocc_csrs_17_value (_core_io_rocc_csrs_17_value), .io_rocc_csrs_17_stall (_rerocc_client_io_csrs_17_stall), // @[Configs.scala:14:35] .io_rocc_csrs_17_sdata (_rerocc_client_io_csrs_17_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_18_ren (_core_io_rocc_csrs_18_ren), .io_rocc_csrs_18_wen (_core_io_rocc_csrs_18_wen), .io_rocc_csrs_18_wdata (_core_io_rocc_csrs_18_wdata), .io_rocc_csrs_18_value (_core_io_rocc_csrs_18_value), .io_rocc_csrs_18_stall (_rerocc_client_io_csrs_18_stall), // @[Configs.scala:14:35] .io_rocc_csrs_18_sdata (_rerocc_client_io_csrs_18_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_19_ren (_core_io_rocc_csrs_19_ren), .io_rocc_csrs_19_wen (_core_io_rocc_csrs_19_wen), .io_rocc_csrs_19_wdata (_core_io_rocc_csrs_19_wdata), .io_rocc_csrs_19_value (_core_io_rocc_csrs_19_value), .io_rocc_csrs_19_stall (_rerocc_client_io_csrs_19_stall), // @[Configs.scala:14:35] .io_rocc_csrs_19_sdata (_rerocc_client_io_csrs_19_sdata), // @[Configs.scala:14:35] .io_rocc_csrs_20_ren (_core_io_rocc_csrs_20_ren), .io_rocc_csrs_20_wen (_core_io_rocc_csrs_20_wen), .io_rocc_csrs_20_wdata (_core_io_rocc_csrs_20_wdata), .io_rocc_csrs_20_value (_core_io_rocc_csrs_20_value), .io_rocc_csrs_20_stall (_rerocc_client_io_csrs_20_stall), // @[Configs.scala:14:35] .io_rocc_csrs_20_sdata (_rerocc_client_io_csrs_20_sdata), // @[Configs.scala:14:35] .io_trace_insns_0_valid (traceSourceNodeOut_insns_0_valid), .io_trace_insns_0_iaddr (traceSourceNodeOut_insns_0_iaddr), .io_trace_insns_0_insn (traceSourceNodeOut_insns_0_insn), .io_trace_insns_0_priv (traceSourceNodeOut_insns_0_priv), .io_trace_insns_0_exception (traceSourceNodeOut_insns_0_exception), .io_trace_insns_0_interrupt (traceSourceNodeOut_insns_0_interrupt), .io_trace_insns_0_cause (traceSourceNodeOut_insns_0_cause), .io_trace_insns_0_tval (traceSourceNodeOut_insns_0_tval), .io_trace_time (traceSourceNodeOut_time), .io_bpwatch_0_valid_0 (bpwatchSourceNodeOut_0_valid_0), .io_bpwatch_0_action (bpwatchSourceNodeOut_0_action), .io_wfi (_core_io_wfi) ); // @[RocketTile.scala:147:20] assign auto_rerocc_buffer_out_req_valid = auto_rerocc_buffer_out_req_valid_0; // @[RocketTile.scala:141:7] assign auto_rerocc_buffer_out_req_bits_opcode = auto_rerocc_buffer_out_req_bits_opcode_0; // @[RocketTile.scala:141:7] assign auto_rerocc_buffer_out_req_bits_client_id = auto_rerocc_buffer_out_req_bits_client_id_0; // @[RocketTile.scala:141:7] assign auto_rerocc_buffer_out_req_bits_manager_id = auto_rerocc_buffer_out_req_bits_manager_id_0; // @[RocketTile.scala:141:7] assign auto_rerocc_buffer_out_req_bits_data = auto_rerocc_buffer_out_req_bits_data_0; // @[RocketTile.scala:141:7] assign auto_rerocc_buffer_out_resp_ready = auto_rerocc_buffer_out_resp_ready_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_b_ready = auto_buffer_out_b_ready_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_valid = auto_buffer_out_c_valid_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_opcode = auto_buffer_out_c_bits_opcode_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_param = auto_buffer_out_c_bits_param_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_size = auto_buffer_out_c_bits_size_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_source = auto_buffer_out_c_bits_source_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_address = auto_buffer_out_c_bits_address_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_data = auto_buffer_out_c_bits_data_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_e_valid = auto_buffer_out_e_valid_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_e_bits_sink = auto_buffer_out_e_bits_sink_0; // @[RocketTile.scala:141:7] assign auto_wfi_out_0 = auto_wfi_out_0_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_valid = auto_trace_source_out_insns_0_valid_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_iaddr = auto_trace_source_out_insns_0_iaddr_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_insn = auto_trace_source_out_insns_0_insn_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_priv = auto_trace_source_out_insns_0_priv_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_exception = auto_trace_source_out_insns_0_exception_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_interrupt = auto_trace_source_out_insns_0_interrupt_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_cause = auto_trace_source_out_insns_0_cause_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_tval = auto_trace_source_out_insns_0_tval_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_time = auto_trace_source_out_time_0; // @[RocketTile.scala:141:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_49( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [11:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [11:0] io_in_d_bits_source // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [1:0] size; // @[Monitor.scala:389:22] reg [11:0] source; // @[Monitor.scala:390:22] reg [25:0] address; // @[Monitor.scala:391:22] reg d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg [11:0] source_1; // @[Monitor.scala:541:22] reg [2063:0] inflight; // @[Monitor.scala:614:27] reg [8255:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [8255:0] inflight_sizes; // @[Monitor.scala:618:33] reg a_first_counter_1; // @[Edges.scala:229:27] reg d_first_counter_1; // @[Edges.scala:229:27] wire [4095:0] _GEN = {4084'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_0 = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_1 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [4095:0] _GEN_2 = {4084'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [2063:0] inflight_1; // @[Monitor.scala:726:35] reg [8255:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg d_first_counter_2; // @[Edges.scala:229:27] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module rockettile_icache_tag_array_0( // @[DescribedSRAM.scala:17:26] input [5:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [20:0] RW0_wdata, output [20:0] RW0_rdata ); rockettile_icache_tag_array_0_ext rockettile_icache_tag_array_0_ext ( // @[DescribedSRAM.scala:17:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_EntryData_4( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_0 = io_x_ae; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae = io_x_ae_0; // @[package.scala:267:30] wire io_y_sw = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr = io_x_sr_0; // @[package.scala:267:30] wire io_y_pw = io_x_pw_0; // @[package.scala:267:30] wire io_y_px = io_x_px_0; // @[package.scala:267:30] wire io_y_pr = io_x_pr_0; // @[package.scala:267:30] wire io_y_pal = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff = io_x_eff_0; // @[package.scala:267:30] wire io_y_c = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_25( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20] wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24] wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14] wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14] wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18] wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18] wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16] wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16] wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53] wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38] wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38] wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49] wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32] wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53] wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53] wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53] wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53] wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53] wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27] wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63] wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42] wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29] wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42] wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29] wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42] wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39] wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49] wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22] wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33] wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66] wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57] wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37] wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31] wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16] wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31] wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50] wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37] wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31] wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37] wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40] wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37] wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49] wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37] wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37] wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21] wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25] wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26] wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26] wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26] wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26] wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26] wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26] wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26] wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56] wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22] wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22] wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20] wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20] wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22] wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20] wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20] wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20] wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20] wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20] wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20] wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20] wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20] wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20] wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20] wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20] wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}] wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}] wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17] wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56] wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22] wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22] wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20] wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20] wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22] wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20] wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20] wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58] wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24] wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24] wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}] wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41] wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}] wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28] wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}] wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40] wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}] wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67] wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49] wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42] wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}] wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36] wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31] wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32] wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}] wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}] wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30] wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30] wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35] wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35] wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}] wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21] wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32] wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}] wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}] wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67] wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}] wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47] wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54] wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}] wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}] assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37] assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37] wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27] wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27] assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27] assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16] wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30] assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}] assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50] assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31] assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31] wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45] wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44] wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61] wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}] wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67] wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}] wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63] wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}] wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}] wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46] wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27] wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27] wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27] wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49] wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}] wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}] wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57] wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49] wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71] wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}] wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30] wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49] wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49] wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}] wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34] wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38] wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45] wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}] wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60] wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27] assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76] assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40] assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49] assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49] wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34] wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22] wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36] wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}] wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64] wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}] wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32] wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45] wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32] wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43] wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}] wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20] wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}] wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22] wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32] wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}] wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}] wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14] wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17] wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17] wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18] wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}] wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14] wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18] wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15] wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16] wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16] wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16] wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16] wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22] wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}] wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16] wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16] wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11] wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23] assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}] assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33] wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23] wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}] wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}] assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_84( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [15:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [127:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [127:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [15:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [127:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [127:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [7:0] c_first_beats1_decode = 8'h0; // @[Edges.scala:220:59] wire [7:0] c_first_beats1 = 8'h0; // @[Edges.scala:221:14] wire [7:0] _c_first_count_T = 8'h0; // @[Edges.scala:234:27] wire [7:0] c_first_count = 8'h0; // @[Edges.scala:234:25] wire [7:0] _c_first_counter_T = 8'h0; // @[Edges.scala:236:21] wire [7:0] c_set = 8'h0; // @[Monitor.scala:738:34] wire [7:0] c_set_wo_ready = 8'h0; // @[Monitor.scala:739:34] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_4 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:56:48] wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_10 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:56:48] wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31] wire sink_ok = 1'h1; // @[Monitor.scala:309:31] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [7:0] c_first_counter1 = 8'hFF; // @[Edges.scala:230:28] wire [8:0] _c_first_counter1_T = 9'h1FF; // @[Edges.scala:230:28] wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_source = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_source = 3'h0; // @[Bundles.scala:265:61] wire [127:0] _c_first_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_first_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_first_WIRE_2_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_first_WIRE_3_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_set_wo_ready_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_set_wo_ready_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_set_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_set_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_opcodes_set_interm_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_opcodes_set_interm_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_sizes_set_interm_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_sizes_set_interm_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_opcodes_set_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_opcodes_set_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_sizes_set_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_sizes_set_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_probe_ack_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_probe_ack_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_probe_ack_WIRE_2_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_probe_ack_WIRE_3_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _same_cycle_resp_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _same_cycle_resp_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _same_cycle_resp_WIRE_2_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _same_cycle_resp_WIRE_3_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _same_cycle_resp_WIRE_4_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _same_cycle_resp_WIRE_5_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] c_opcodes_set = 32'h0; // @[Monitor.scala:740:34] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [67:0] _c_sizes_set_T_1 = 68'h0; // @[Monitor.scala:768:52] wire [5:0] _c_opcodes_set_T = 6'h0; // @[Monitor.scala:767:79] wire [5:0] _c_sizes_set_T = 6'h0; // @[Monitor.scala:768:77] wire [66:0] _c_opcodes_set_T_1 = 67'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [7:0] _c_set_wo_ready_T = 8'h1; // @[OneHot.scala:58:35] wire [7:0] _c_set_T = 8'h1; // @[OneHot.scala:58:35] wire [63:0] c_sizes_set = 64'h0; // @[Monitor.scala:741:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [2:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [3:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [3:0] mask_sizeOH = {_mask_sizeOH_T_2[3:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_sub_0_1 = |(io_in_a_bits_size_0[3:2]); // @[Misc.scala:206:21] wire mask_sub_sub_sub_size = mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_sub_bit = io_in_a_bits_address_0[3]; // @[Misc.scala:210:26] wire mask_sub_sub_sub_1_2 = mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_sub_nbit = ~mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_sub_0_2 = mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_sub_acc_T = mask_sub_sub_sub_size & mask_sub_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_0_1 = mask_sub_sub_sub_sub_0_1 | _mask_sub_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_sub_acc_T_1 = mask_sub_sub_sub_size & mask_sub_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_1_1 = mask_sub_sub_sub_sub_0_1 | _mask_sub_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_1_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_2_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_2 = mask_sub_sub_size & mask_sub_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_2_1 = mask_sub_sub_sub_1_1 | _mask_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_3_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_3 = mask_sub_sub_size & mask_sub_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_3_1 = mask_sub_sub_sub_1_1 | _mask_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_sub_4_2 = mask_sub_sub_2_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_4 = mask_sub_size & mask_sub_4_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_4_1 = mask_sub_sub_2_1 | _mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_sub_5_2 = mask_sub_sub_2_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_5 = mask_sub_size & mask_sub_5_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_5_1 = mask_sub_sub_2_1 | _mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_sub_6_2 = mask_sub_sub_3_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_6 = mask_sub_size & mask_sub_6_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_6_1 = mask_sub_sub_3_1 | _mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_sub_7_2 = mask_sub_sub_3_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_7 = mask_sub_size & mask_sub_7_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_7_1 = mask_sub_sub_3_1 | _mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_eq_8 = mask_sub_4_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_8 = mask_size & mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_8 = mask_sub_4_1 | _mask_acc_T_8; // @[Misc.scala:215:{29,38}] wire mask_eq_9 = mask_sub_4_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_9 = mask_size & mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_9 = mask_sub_4_1 | _mask_acc_T_9; // @[Misc.scala:215:{29,38}] wire mask_eq_10 = mask_sub_5_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_10 = mask_size & mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_10 = mask_sub_5_1 | _mask_acc_T_10; // @[Misc.scala:215:{29,38}] wire mask_eq_11 = mask_sub_5_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_11 = mask_size & mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_11 = mask_sub_5_1 | _mask_acc_T_11; // @[Misc.scala:215:{29,38}] wire mask_eq_12 = mask_sub_6_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_12 = mask_size & mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_12 = mask_sub_6_1 | _mask_acc_T_12; // @[Misc.scala:215:{29,38}] wire mask_eq_13 = mask_sub_6_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_13 = mask_size & mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_13 = mask_sub_6_1 | _mask_acc_T_13; // @[Misc.scala:215:{29,38}] wire mask_eq_14 = mask_sub_7_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_14 = mask_size & mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_14 = mask_sub_7_1 | _mask_acc_T_14; // @[Misc.scala:215:{29,38}] wire mask_eq_15 = mask_sub_7_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_15 = mask_size & mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_15 = mask_sub_7_1 | _mask_acc_T_15; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_lo = {mask_lo_lo_hi, mask_lo_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_hi = {mask_lo_hi_hi, mask_lo_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_lo = {mask_acc_9, mask_acc_8}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_lo_hi = {mask_acc_11, mask_acc_10}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_lo = {mask_hi_lo_hi, mask_hi_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_lo = {mask_acc_13, mask_acc_12}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi_hi = {mask_acc_15, mask_acc_14}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_hi = {mask_hi_hi_hi, mask_hi_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [15:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [2:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _T_1267 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1267; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1267; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [7:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 8'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [7:0] a_first_counter; // @[Edges.scala:229:27] wire [8:0] _a_first_counter1_T = {1'h0, a_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] a_first_counter1 = _a_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [7:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [2:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_1340 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1340; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1340; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1340; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [7:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T = {1'h0, d_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1 = _d_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [2:0] source_1; // @[Monitor.scala:541:22] reg [6:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [7:0] inflight; // @[Monitor.scala:614:27] reg [31:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [63:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [7:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:4]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [7:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 8'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [7:0] a_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] a_first_counter1_1 = _a_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [7:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_1 = _d_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [7:0] a_set; // @[Monitor.scala:626:34] wire [7:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [31:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [63:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [5:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [5:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [5:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [5:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [5:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [31:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [31:0] _a_opcode_lookup_T_6 = {28'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [31:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[31:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [5:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [5:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [5:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [5:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [5:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [63:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [63:0] _a_size_lookup_T_6 = {56'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [63:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[63:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [7:0] _GEN_3 = {5'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35] wire [7:0] _GEN_4 = 8'h1 << _GEN_3; // @[OneHot.scala:58:35] wire [7:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35] wire [7:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 8'h0; // @[OneHot.scala:58:35] wire _T_1193 = _T_1267 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1193 ? _a_set_T : 8'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1193 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1193 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [5:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [66:0] _a_opcodes_set_T_1 = {63'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1193 ? _a_opcodes_set_T_1[31:0] : 32'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [5:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [67:0] _a_sizes_set_T_1 = {63'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1193 ? _a_sizes_set_T_1[63:0] : 64'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [7:0] d_clr; // @[Monitor.scala:664:34] wire [7:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [31:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [63:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46] wire _T_1239 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [7:0] _GEN_6 = {5'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35] wire [7:0] _GEN_7 = 8'h1 << _GEN_6; // @[OneHot.scala:58:35] wire [7:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35] wire [7:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35] wire [7:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35] wire [7:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1239 & ~d_release_ack ? _d_clr_wo_ready_T : 8'h0; // @[OneHot.scala:58:35] wire _T_1208 = _T_1340 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1208 ? _d_clr_T : 8'h0; // @[OneHot.scala:58:35] wire [78:0] _d_opcodes_clr_T_5 = 79'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1208 ? _d_opcodes_clr_T_5[31:0] : 32'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [78:0] _d_sizes_clr_T_5 = 79'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1208 ? _d_sizes_clr_T_5[63:0] : 64'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [7:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [7:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [7:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [31:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [31:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [31:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [63:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [63:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [63:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [7:0] inflight_1; // @[Monitor.scala:726:35] wire [7:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [31:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [31:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [63:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [63:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_2; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_2 = _d_first_counter1_T_2[7:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [31:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [31:0] _c_opcode_lookup_T_6 = {28'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [31:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[31:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [63:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [63:0] _c_size_lookup_T_6 = {56'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [63:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[63:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [7:0] d_clr_1; // @[Monitor.scala:774:34] wire [7:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [31:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [63:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1311 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1311 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 8'h0; // @[OneHot.scala:58:35] wire _T_1293 = _T_1340 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1293 ? _d_clr_T_1 : 8'h0; // @[OneHot.scala:58:35] wire [78:0] _d_opcodes_clr_T_11 = 79'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1293 ? _d_opcodes_clr_T_11[31:0] : 32'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [78:0] _d_sizes_clr_T_11 = 79'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1293 ? _d_sizes_clr_T_11[63:0] : 64'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 3'h0; // @[Monitor.scala:36:7, :795:113] wire [7:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [7:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [31:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [31:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [63:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [63:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File HellaInterface.scala: package saturn.rocket import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.diplomacy._ import saturn.common._ import saturn.mem.{VectorMemIO} class HellaCacheInterface(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val status = Input(new MStatus) val dmem = new HellaCacheIO val vec = Flipped(new VectorMemIO) val vec_busy = Input(Bool()) val mem_busy = Output(Bool()) }) val hella_simple = Module(new SimpleHellaCacheIF) val hella_arb = Module(new HellaCacheArbiter(2)) hella_simple.io.requestor <> hella_arb.io.mem io.dmem <> hella_simple.io.cache val hella_load = hella_arb.io.requestor(1) val hella_store = hella_arb.io.requestor(0) val hella_load_q = Module(new Queue(new HellaCacheReq, 2)) hella_load.req <> hella_load_q.io.deq val hella_store_q = Module(new Queue(new HellaCacheReq, 2)) hella_store.req <> hella_store_q.io.deq hella_arb.io.requestor.foreach { h => h.s1_kill := false.B h.s1_data := DontCare h.s2_kill := false.B h.keep_clock_enabled := io.vec_busy } val inflights = RegInit(0.U((1+dmemTagBits).W)) io.vec.load_req.ready := hella_load_q.io.enq.ready hella_load_q.io.enq.valid := io.vec.load_req.valid hella_load_q.io.enq.bits.addr := io.vec.load_req.bits.addr hella_load_q.io.enq.bits.size := log2Ceil(dLenB).U hella_load_q.io.enq.bits.tag := Cat(0.U, io.vec.load_req.bits.tag) hella_load_q.io.enq.bits.cmd := M_XRD hella_load_q.io.enq.bits.signed := false.B hella_load_q.io.enq.bits.dprv := io.status.prv hella_load_q.io.enq.bits.dv := io.status.dv hella_load_q.io.enq.bits.data := DontCare hella_load_q.io.enq.bits.mask := DontCare hella_load_q.io.enq.bits.phys := false.B hella_load_q.io.enq.bits.no_resp := false.B hella_load_q.io.enq.bits.no_alloc := false.B hella_load_q.io.enq.bits.no_xcpt := true.B io.vec.load_resp.valid := hella_load.resp.valid io.vec.load_resp.bits.data := hella_load.resp.bits.data_raw io.vec.load_resp.bits.tag := hella_load.resp.bits.tag io.vec.store_req.ready := hella_store_q.io.enq.ready hella_store_q.io.enq.valid := io.vec.store_req.valid hella_store_q.io.enq.bits.addr := io.vec.store_req.bits.addr hella_store_q.io.enq.bits.tag := Cat(1.U, io.vec.store_req.bits.tag) hella_store_q.io.enq.bits.cmd := M_PWR hella_store_q.io.enq.bits.size := log2Ceil(dLenB).U hella_store_q.io.enq.bits.signed := false.B hella_store_q.io.enq.bits.dprv := io.status.prv hella_store_q.io.enq.bits.dv := io.status.dv hella_store_q.io.enq.bits.data := io.vec.store_req.bits.data hella_store_q.io.enq.bits.mask := io.vec.store_req.bits.mask hella_store_q.io.enq.bits.phys := false.B hella_store_q.io.enq.bits.no_resp := false.B hella_store_q.io.enq.bits.no_alloc := false.B hella_store_q.io.enq.bits.no_xcpt := true.B io.vec.store_ack.valid := hella_store.resp.valid io.vec.store_ack.bits.data := DontCare io.vec.store_ack.bits.tag := hella_store.resp.bits.tag io.mem_busy := inflights =/= 0.U val load_enq = hella_load_q.io.enq.fire val store_enq = hella_store_q.io.enq.fire val load_deq = hella_load.resp.fire val store_deq = hella_store.resp.fire when (load_enq || store_enq || load_deq || store_deq) { inflights := inflights + (load_enq +& store_enq) - (load_deq +& store_deq) } }
module HellaCacheInterface( // @[HellaInterface.scala:14:7] input clock, // @[HellaInterface.scala:14:7] input reset, // @[HellaInterface.scala:14:7] input io_status_dv, // @[HellaInterface.scala:15:14] input [1:0] io_status_prv, // @[HellaInterface.scala:15:14] input io_dmem_req_ready, // @[HellaInterface.scala:15:14] output io_dmem_req_valid, // @[HellaInterface.scala:15:14] output [39:0] io_dmem_req_bits_addr, // @[HellaInterface.scala:15:14] output [7:0] io_dmem_req_bits_tag, // @[HellaInterface.scala:15:14] output [4:0] io_dmem_req_bits_cmd, // @[HellaInterface.scala:15:14] output [2:0] io_dmem_req_bits_size, // @[HellaInterface.scala:15:14] output io_dmem_req_bits_signed, // @[HellaInterface.scala:15:14] output [1:0] io_dmem_req_bits_dprv, // @[HellaInterface.scala:15:14] output io_dmem_req_bits_dv, // @[HellaInterface.scala:15:14] output io_dmem_req_bits_phys, // @[HellaInterface.scala:15:14] output io_dmem_req_bits_no_alloc, // @[HellaInterface.scala:15:14] output io_dmem_req_bits_no_xcpt, // @[HellaInterface.scala:15:14] output [15:0] io_dmem_req_bits_mask, // @[HellaInterface.scala:15:14] output [127:0] io_dmem_s1_data_data, // @[HellaInterface.scala:15:14] output [15:0] io_dmem_s1_data_mask, // @[HellaInterface.scala:15:14] input io_dmem_s2_nack, // @[HellaInterface.scala:15:14] input io_dmem_resp_valid, // @[HellaInterface.scala:15:14] input [7:0] io_dmem_resp_bits_tag, // @[HellaInterface.scala:15:14] input [127:0] io_dmem_resp_bits_data_raw, // @[HellaInterface.scala:15:14] input io_dmem_s2_xcpt_ma_ld, // @[HellaInterface.scala:15:14] input io_dmem_s2_xcpt_ma_st, // @[HellaInterface.scala:15:14] input io_dmem_s2_xcpt_pf_ld, // @[HellaInterface.scala:15:14] input io_dmem_s2_xcpt_pf_st, // @[HellaInterface.scala:15:14] input io_dmem_s2_xcpt_ae_ld, // @[HellaInterface.scala:15:14] input io_dmem_s2_xcpt_ae_st, // @[HellaInterface.scala:15:14] output io_vec_load_req_ready, // @[HellaInterface.scala:15:14] input [39:0] io_vec_load_req_bits_addr, // @[HellaInterface.scala:15:14] input [3:0] io_vec_load_req_bits_tag, // @[HellaInterface.scala:15:14] output io_vec_load_resp_valid, // @[HellaInterface.scala:15:14] output [127:0] io_vec_load_resp_bits_data, // @[HellaInterface.scala:15:14] output [3:0] io_vec_load_resp_bits_tag, // @[HellaInterface.scala:15:14] output io_vec_store_req_ready, // @[HellaInterface.scala:15:14] input [39:0] io_vec_store_req_bits_addr, // @[HellaInterface.scala:15:14] input [127:0] io_vec_store_req_bits_data, // @[HellaInterface.scala:15:14] input [15:0] io_vec_store_req_bits_mask, // @[HellaInterface.scala:15:14] input [3:0] io_vec_store_req_bits_tag, // @[HellaInterface.scala:15:14] output io_vec_store_ack_valid, // @[HellaInterface.scala:15:14] output [3:0] io_vec_store_ack_bits_tag, // @[HellaInterface.scala:15:14] output io_mem_busy // @[HellaInterface.scala:15:14] ); wire _hella_store_q_io_deq_valid; // @[HellaInterface.scala:33:29] wire [39:0] _hella_store_q_io_deq_bits_addr; // @[HellaInterface.scala:33:29] wire [7:0] _hella_store_q_io_deq_bits_tag; // @[HellaInterface.scala:33:29] wire [4:0] _hella_store_q_io_deq_bits_cmd; // @[HellaInterface.scala:33:29] wire [2:0] _hella_store_q_io_deq_bits_size; // @[HellaInterface.scala:33:29] wire _hella_store_q_io_deq_bits_signed; // @[HellaInterface.scala:33:29] wire [1:0] _hella_store_q_io_deq_bits_dprv; // @[HellaInterface.scala:33:29] wire _hella_store_q_io_deq_bits_dv; // @[HellaInterface.scala:33:29] wire _hella_store_q_io_deq_bits_phys; // @[HellaInterface.scala:33:29] wire _hella_store_q_io_deq_bits_no_alloc; // @[HellaInterface.scala:33:29] wire _hella_store_q_io_deq_bits_no_xcpt; // @[HellaInterface.scala:33:29] wire [127:0] _hella_store_q_io_deq_bits_data; // @[HellaInterface.scala:33:29] wire [15:0] _hella_store_q_io_deq_bits_mask; // @[HellaInterface.scala:33:29] wire _hella_load_q_io_deq_valid; // @[HellaInterface.scala:31:28] wire [39:0] _hella_load_q_io_deq_bits_addr; // @[HellaInterface.scala:31:28] wire [7:0] _hella_load_q_io_deq_bits_tag; // @[HellaInterface.scala:31:28] wire [4:0] _hella_load_q_io_deq_bits_cmd; // @[HellaInterface.scala:31:28] wire [2:0] _hella_load_q_io_deq_bits_size; // @[HellaInterface.scala:31:28] wire _hella_load_q_io_deq_bits_signed; // @[HellaInterface.scala:31:28] wire [1:0] _hella_load_q_io_deq_bits_dprv; // @[HellaInterface.scala:31:28] wire _hella_load_q_io_deq_bits_dv; // @[HellaInterface.scala:31:28] wire _hella_load_q_io_deq_bits_phys; // @[HellaInterface.scala:31:28] wire _hella_load_q_io_deq_bits_no_alloc; // @[HellaInterface.scala:31:28] wire _hella_load_q_io_deq_bits_no_xcpt; // @[HellaInterface.scala:31:28] wire [127:0] _hella_load_q_io_deq_bits_data; // @[HellaInterface.scala:31:28] wire [15:0] _hella_load_q_io_deq_bits_mask; // @[HellaInterface.scala:31:28] wire _hella_arb_io_requestor_0_req_ready; // @[HellaInterface.scala:24:25] wire _hella_arb_io_requestor_0_resp_valid; // @[HellaInterface.scala:24:25] wire [7:0] _hella_arb_io_requestor_0_resp_bits_tag; // @[HellaInterface.scala:24:25] wire _hella_arb_io_requestor_1_req_ready; // @[HellaInterface.scala:24:25] wire _hella_arb_io_requestor_1_resp_valid; // @[HellaInterface.scala:24:25] wire [7:0] _hella_arb_io_requestor_1_resp_bits_tag; // @[HellaInterface.scala:24:25] wire _hella_arb_io_mem_req_valid; // @[HellaInterface.scala:24:25] wire [39:0] _hella_arb_io_mem_req_bits_addr; // @[HellaInterface.scala:24:25] wire [7:0] _hella_arb_io_mem_req_bits_tag; // @[HellaInterface.scala:24:25] wire [4:0] _hella_arb_io_mem_req_bits_cmd; // @[HellaInterface.scala:24:25] wire [2:0] _hella_arb_io_mem_req_bits_size; // @[HellaInterface.scala:24:25] wire _hella_arb_io_mem_req_bits_signed; // @[HellaInterface.scala:24:25] wire [1:0] _hella_arb_io_mem_req_bits_dprv; // @[HellaInterface.scala:24:25] wire _hella_arb_io_mem_req_bits_dv; // @[HellaInterface.scala:24:25] wire _hella_arb_io_mem_req_bits_phys; // @[HellaInterface.scala:24:25] wire _hella_arb_io_mem_req_bits_no_alloc; // @[HellaInterface.scala:24:25] wire _hella_arb_io_mem_req_bits_no_xcpt; // @[HellaInterface.scala:24:25] wire [127:0] _hella_arb_io_mem_req_bits_data; // @[HellaInterface.scala:24:25] wire [15:0] _hella_arb_io_mem_req_bits_mask; // @[HellaInterface.scala:24:25] wire _hella_simple_io_requestor_req_ready; // @[HellaInterface.scala:23:28] wire _hella_simple_io_requestor_resp_valid; // @[HellaInterface.scala:23:28] wire [7:0] _hella_simple_io_requestor_resp_bits_tag; // @[HellaInterface.scala:23:28] wire [127:0] _hella_simple_io_requestor_resp_bits_data_raw; // @[HellaInterface.scala:23:28] reg [4:0] inflights; // @[HellaInterface.scala:43:26] always @(posedge clock) begin // @[HellaInterface.scala:14:7] if (reset) // @[HellaInterface.scala:14:7] inflights <= 5'h0; // @[HellaInterface.scala:43:26] else if (_hella_arb_io_requestor_1_resp_valid | _hella_arb_io_requestor_0_resp_valid) // @[HellaInterface.scala:24:25, :91:43] inflights <= inflights - {3'h0, {1'h0, _hella_arb_io_requestor_1_resp_valid} + {1'h0, _hella_arb_io_requestor_0_resp_valid}}; // @[HellaInterface.scala:14:7, :24:25, :43:26, :92:{54,66}] always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } } File Types.scala: package constellation.routing import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import constellation.noc.{HasNoCParams} import constellation.channel.{Flit} /** A representation for 1 specific virtual channel in wormhole routing * * @param src the source node * @param vc ID for the virtual channel * @param dst the destination node * @param n_vc the number of virtual channels */ // BEGIN: ChannelRoutingInfo case class ChannelRoutingInfo( src: Int, dst: Int, vc: Int, n_vc: Int ) { // END: ChannelRoutingInfo require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this") require (!(src == -1 && dst == -1), s"Illegal $this") require (vc < n_vc, s"Illegal $this") val isIngress = src == -1 val isEgress = dst == -1 } /** Represents the properties of a packet that are relevant for routing * ingressId and egressId uniquely identify a flow, but vnet and dst are used here * to simplify the implementation of routingrelations * * @param ingressId packet's source ingress point * @param egressId packet's destination egress point * @param vNet virtual subnetwork identifier * @param dst packet's destination node ID */ // BEGIN: FlowRoutingInfo case class FlowRoutingInfo( ingressId: Int, egressId: Int, vNetId: Int, ingressNode: Int, ingressNodeId: Int, egressNode: Int, egressNodeId: Int, fifo: Boolean ) { // END: FlowRoutingInfo def isFlow(f: FlowRoutingBundle): Bool = { (f.ingress_node === ingressNode.U && f.egress_node === egressNode.U && f.ingress_node_id === ingressNodeId.U && f.egress_node_id === egressNodeId.U) } def asLiteral(b: FlowRoutingBundle): BigInt = { Seq( (vNetId , b.vnet_id), (ingressNode , b.ingress_node), (ingressNodeId , b.ingress_node_id), (egressNode , b.egress_node), (egressNodeId , b.egress_node_id) ).foldLeft(0)((l, t) => { (l << t._2.getWidth) | t._1 }) } } class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams { // Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination // This simplifies the routing tables val vnet_id = UInt(log2Ceil(nVirtualNetworks).W) val ingress_node = UInt(log2Ceil(nNodes).W) val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W) val egress_node = UInt(log2Ceil(nNodes).W) val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W) }
module NoCMonitor_48( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_55( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] output io_q // @[ShiftReg.scala:36:14] ); wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_29( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [31:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [31:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire c_set = 1'h0; // @[Monitor.scala:738:34] wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [9:0] c_first_beats1_decode = 10'h0; // @[Edges.scala:220:59] wire [9:0] c_first_beats1 = 10'h0; // @[Edges.scala:221:14] wire [9:0] _c_first_count_T = 10'h0; // @[Edges.scala:234:27] wire [9:0] c_first_count = 10'h0; // @[Edges.scala:234:25] wire [9:0] _c_first_counter_T = 10'h0; // @[Edges.scala:236:21] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [9:0] c_first_counter1 = 10'h3FF; // @[Edges.scala:230:28] wire [10:0] _c_first_counter1_T = 11'h7FF; // @[Edges.scala:230:28] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_4_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_5_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [19:0] _c_sizes_set_T_1 = 20'h0; // @[Monitor.scala:768:52] wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35] wire [7:0] c_sizes_set = 8'h0; // @[Monitor.scala:741:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire _source_ok_T = ~io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire mask_sizeOH_shiftAmount = _mask_sizeOH_T[0]; // @[OneHot.scala:64:49] wire [1:0] _mask_sizeOH_T_1 = 2'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [1:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [1:0] mask_sizeOH = {_mask_sizeOH_T_2[1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_0_1 = |(io_in_a_bits_size_0[3:1]); // @[Misc.scala:206:21] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_1_2 = mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire _source_ok_T_1 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_1; // @[Parameters.scala:1138:31] wire _T_1081 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1081; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1081; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [9:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:2]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [9:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 10'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [9:0] a_first_counter; // @[Edges.scala:229:27] wire [10:0] _a_first_counter1_T = {1'h0, a_first_counter} - 11'h1; // @[Edges.scala:229:27, :230:28] wire [9:0] a_first_counter1 = _a_first_counter1_T[9:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 10'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 10'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 10'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [9:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [9:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [9:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_1154 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1154; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1154; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1154; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [9:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:2]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [9:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 10'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [9:0] d_first_counter; // @[Edges.scala:229:27] wire [10:0] _d_first_counter1_T = {1'h0, d_first_counter} - 11'h1; // @[Edges.scala:229:27, :230:28] wire [9:0] d_first_counter1 = _d_first_counter1_T[9:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 10'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 10'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 10'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [9:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [9:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [9:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [9:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:2]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [9:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 10'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [9:0] a_first_counter_1; // @[Edges.scala:229:27] wire [10:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 11'h1; // @[Edges.scala:229:27, :230:28] wire [9:0] a_first_counter1_1 = _a_first_counter1_T_1[9:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 10'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 10'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 10'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [9:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [9:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [9:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [9:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:2]; // @[package.scala:243:46] wire [9:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 10'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [9:0] d_first_counter_1; // @[Edges.scala:229:27] wire [10:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 11'h1; // @[Edges.scala:229:27, :230:28] wire [9:0] d_first_counter1_1 = _d_first_counter1_T_1[9:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 10'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 10'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 10'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [9:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [9:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [9:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire a_set; // @[Monitor.scala:626:34] wire a_set_wo_ready; // @[Monitor.scala:627:34] wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [7:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [3:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}] wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [3:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [7:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [15:0] _a_size_lookup_T_6 = {8'h0, _a_size_lookup_T_1}; // @[Monitor.scala:641:{40,91}] wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [1:0] _GEN_3 = {1'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35] wire [1:0] _GEN_4 = 2'h1 << _GEN_3; // @[OneHot.scala:58:35] wire [1:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35] wire [1:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T & _a_set_wo_ready_T[0]; // @[OneHot.scala:58:35] wire _T_1007 = _T_1081 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1007 & _a_set_T[0]; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1007 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1007 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [3:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1007 ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [3:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :660:{52,77}] assign a_sizes_set = _T_1007 ? _a_sizes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire d_clr; // @[Monitor.scala:664:34] wire d_clr_wo_ready; // @[Monitor.scala:665:34] wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [7:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46] wire _T_1053 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [1:0] _GEN_6 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35] wire [1:0] _GEN_7 = 2'h1 << _GEN_6; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1053 & ~d_release_ack & _d_clr_wo_ready_T[0]; // @[OneHot.scala:58:35] wire _T_1022 = _T_1154 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1022 & _d_clr_T[0]; // @[OneHot.scala:58:35] wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1022 ? _d_opcodes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [30:0] _d_sizes_clr_T_5 = 31'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1022 ? _d_sizes_clr_T_5[7:0] : 8'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27] wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}] wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [7:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [7:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [7:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1:0] inflight_1; // @[Monitor.scala:726:35] wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [7:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [9:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:2]; // @[package.scala:243:46] wire [9:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 10'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [9:0] d_first_counter_2; // @[Edges.scala:229:27] wire [10:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 11'h1; // @[Edges.scala:229:27, :230:28] wire [9:0] d_first_counter1_2 = _d_first_counter1_T_2[9:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 10'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 10'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 10'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [9:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [9:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [9:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:749:{44,97}] wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [7:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [15:0] _c_size_lookup_T_6 = {8'h0, _c_size_lookup_T_1}; // @[Monitor.scala:750:{42,93}] wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire d_clr_1; // @[Monitor.scala:774:34] wire d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [7:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1125 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1125 & d_release_ack_1 & _d_clr_wo_ready_T_1[0]; // @[OneHot.scala:58:35] wire _T_1107 = _T_1154 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1107 & _d_clr_T_1[0]; // @[OneHot.scala:58:35] wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1107 ? _d_opcodes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [30:0] _d_sizes_clr_T_11 = 31'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1107 ? _d_sizes_clr_T_11[7:0] : 8'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113] wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}] wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [7:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [7:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_33( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54] wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52] wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79] wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35] wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35] wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34] wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34] wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34] wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 8'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_25 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_31 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire _source_ok_T_14 = _source_ok_T_13 == 6'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 6'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_26 = _source_ok_T_25 == 6'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_32 = _source_ok_T_31 == 6'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31] wire _source_ok_T_37 = io_in_a_bits_source_0 == 8'h41; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_37; // @[Parameters.scala:1138:31] wire _source_ok_T_38 = io_in_a_bits_source_0 == 8'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31] wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31] wire _source_ok_T_40 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_47 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [2:0] uncommonBits = _uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_1 = _uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_7 = _uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_18 = _uncommonBits_T_18[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_25 = _uncommonBits_T_25[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_30 = _uncommonBits_T_30[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_31 = _uncommonBits_T_31[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_36 = _uncommonBits_T_36[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_37 = _uncommonBits_T_37[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_42 = _uncommonBits_T_42[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_43 = _uncommonBits_T_43[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_48 = _uncommonBits_T_48[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_48 = io_in_d_bits_source_0 == 8'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_48; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7] wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_54; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] _source_ok_T_61 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_67 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_73 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_79 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire _source_ok_T_62 = _source_ok_T_61 == 6'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_66; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_68 = _source_ok_T_67 == 6'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_72; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_74 = _source_ok_T_73 == 6'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_5 = _source_ok_T_78; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_80 = _source_ok_T_79 == 6'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_6 = _source_ok_T_84; // @[Parameters.scala:1138:31] wire _source_ok_T_85 = io_in_d_bits_source_0 == 8'h41; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_85; // @[Parameters.scala:1138:31] wire _source_ok_T_86 = io_in_d_bits_source_0 == 8'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_86; // @[Parameters.scala:1138:31] wire _source_ok_T_87 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_87; // @[Parameters.scala:1138:31] wire _source_ok_T_88 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_89 = _source_ok_T_88 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_90 = _source_ok_T_89 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_91 = _source_ok_T_90 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_95 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire _T_1115 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1115; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1115; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [7:0] source; // @[Monitor.scala:390:22] reg [20:0] address; // @[Monitor.scala:391:22] wire _T_1183 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1183; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1183; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1183; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [7:0] source_1; // @[Monitor.scala:541:22] reg [128:0] inflight; // @[Monitor.scala:614:27] reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [515:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [128:0] a_set; // @[Monitor.scala:626:34] wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [515:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [255:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire _T_1048 = _T_1115 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1048 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1048 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1048 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1048 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1048 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [128:0] d_clr; // @[Monitor.scala:664:34] wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1094 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [255:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1094 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire _T_1063 = _T_1183 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1063 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1063 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1063 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [128:0] inflight_1; // @[Monitor.scala:726:35] wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [128:0] d_clr_1; // @[Monitor.scala:774:34] wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1159 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1159 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35] wire _T_1141 = _T_1183 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1141 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35] wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1141 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1141 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113] wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_6( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [9:0] io_status_bits_set, // @[MSHR.scala:86:14] output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [2:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [2:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [2:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57] wire excluded_client = 1'h0; // @[MSHR.scala:279:28] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34] wire allocate_as_full_prio_1 = 1'h0; // @[MSHR.scala:504:34] wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24] wire new_request_prio_1 = 1'h0; // @[MSHR.scala:506:24] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53] wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66] wire [2:0] io_schedule_bits_a_bits_source = 3'h0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_source = 3'h0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_sink = 3'h0; // @[MSHR.scala:84:7] wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [12:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [9:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64] reg [12:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [2:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 6'h21; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 6'h21; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 6'h21; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_100( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_164 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") }
module MSHR_2( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_0, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_clients, // @[MSHR.scala:86:14] input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [9:0] io_status_bits_set, // @[MSHR.scala:86:14] output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [2:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [2:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [2:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_source = 3'h0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_source = 3'h0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_sink = 3'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _req_clientBit_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _req_clientBit_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _probe_bit_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _probe_bit_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _new_clientBit_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _new_clientBit_T_8 = 1'h1; // @[Parameters.scala:56:32] wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_clients = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire [1:0] _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [5:0] _probe_bit_uncommonBits_T = io_sinkc_bits_source_0; // @[Parameters.scala:52:29] wire [5:0] _probe_bit_uncommonBits_T_1 = io_sinkc_bits_source_0; // @[Parameters.scala:52:29] wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_0; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] wire [5:0] _req_clientBit_uncommonBits_T = request_source; // @[Parameters.scala:52:29] wire [5:0] _req_clientBit_uncommonBits_T_1 = request_source; // @[Parameters.scala:52:29] reg [12:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [9:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg [1:0] meta_clients; // @[MSHR.scala:100:17] reg [12:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [2:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg [1:0] probes_done; // @[MSHR.scala:150:24] reg [1:0] probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire [1:0] req_clientBit_uncommonBits = _req_clientBit_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _req_clientBit_T = request_source[5:2]; // @[Parameters.scala:54:10] wire [3:0] _req_clientBit_T_6 = request_source[5:2]; // @[Parameters.scala:54:10] wire _req_clientBit_T_1 = _req_clientBit_T == 4'hA; // @[Parameters.scala:54:{10,32}] wire _req_clientBit_T_3 = _req_clientBit_T_1; // @[Parameters.scala:54:{32,67}] wire _req_clientBit_T_4 = req_clientBit_uncommonBits != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _req_clientBit_T_5 = _req_clientBit_T_3 & _req_clientBit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20] wire [1:0] req_clientBit_uncommonBits_1 = _req_clientBit_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _req_clientBit_T_7 = _req_clientBit_T_6 == 4'h8; // @[Parameters.scala:54:{10,32}] wire _req_clientBit_T_9 = _req_clientBit_T_7; // @[Parameters.scala:54:{32,67}] wire _req_clientBit_T_10 = req_clientBit_uncommonBits_1 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _req_clientBit_T_11 = _req_clientBit_T_9 & _req_clientBit_T_10; // @[Parameters.scala:54:67, :56:48, :57:20] wire [1:0] req_clientBit = {_req_clientBit_T_11, _req_clientBit_T_5}; // @[Parameters.scala:56:48] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire _meta_no_clients_T = |meta_clients; // @[MSHR.scala:100:17, :220:39] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire [1:0] _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 ? req_clientBit : 2'h0; // @[Parameters.scala:201:10, :282:66] wire [1:0] _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire [1:0] _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire [1:0] _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire [1:0] _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire [1:0] _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire [1:0] _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire [1:0] _final_meta_writeback_clients_T_12 = meta_hit ? _final_meta_writeback_clients_T_11 : 2'h0; // @[MSHR.scala:100:17, :245:{40,64}] wire [1:0] _final_meta_writeback_clients_T_13 = req_acquire ? req_clientBit : 2'h0; // @[Parameters.scala:201:10] wire [1:0] _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire [1:0] _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire [1:0] _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? (meta_hit ? _final_meta_writeback_clients_T_16 : 2'h0) : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire [1:0] _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:201:10] wire _honour_BtoT_T_1 = |_honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106] wire [1:0] excluded_client = _excluded_client_T_9 ? req_clientBit : 2'h0; // @[Parameters.scala:201:10] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] wire [1:0] _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire evict_c = |meta_clients; // @[MSHR.scala:100:17, :220:39, :315:27] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire before_c = |meta_clients; // @[MSHR.scala:100:17, :220:39, :315:27] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire after_c = |final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire [1:0] probe_bit_uncommonBits = _probe_bit_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _probe_bit_T = io_sinkc_bits_source_0[5:2]; // @[Parameters.scala:54:10] wire [3:0] _probe_bit_T_6 = io_sinkc_bits_source_0[5:2]; // @[Parameters.scala:54:10] wire _probe_bit_T_1 = _probe_bit_T == 4'hA; // @[Parameters.scala:54:{10,32}] wire _probe_bit_T_3 = _probe_bit_T_1; // @[Parameters.scala:54:{32,67}] wire _probe_bit_T_4 = probe_bit_uncommonBits != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _probe_bit_T_5 = _probe_bit_T_3 & _probe_bit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20] wire [1:0] probe_bit_uncommonBits_1 = _probe_bit_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _probe_bit_T_7 = _probe_bit_T_6 == 4'h8; // @[Parameters.scala:54:{10,32}] wire _probe_bit_T_9 = _probe_bit_T_7; // @[Parameters.scala:54:{32,67}] wire _probe_bit_T_10 = probe_bit_uncommonBits_1 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _probe_bit_T_11 = _probe_bit_T_9 & _probe_bit_T_10; // @[Parameters.scala:54:67, :56:48, :57:20] wire [1:0] probe_bit = {_probe_bit_T_11, _probe_bit_T_5}; // @[Parameters.scala:56:48] wire [1:0] _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:201:10] wire [1:0] _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire [1:0] _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire [1:0] _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66] wire [1:0] _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire [1:0] _probes_toN_T = probe_toN ? probe_bit : 2'h0; // @[Parameters.scala:201:10, :282:66] wire [1:0] _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] _new_clientBit_uncommonBits_T = new_request_source; // @[Parameters.scala:52:29] wire [5:0] _new_clientBit_uncommonBits_T_1 = new_request_source; // @[Parameters.scala:52:29] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire [1:0] new_clientBit_uncommonBits = _new_clientBit_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _new_clientBit_T = new_request_source[5:2]; // @[Parameters.scala:54:10] wire [3:0] _new_clientBit_T_6 = new_request_source[5:2]; // @[Parameters.scala:54:10] wire _new_clientBit_T_1 = _new_clientBit_T == 4'hA; // @[Parameters.scala:54:{10,32}] wire _new_clientBit_T_3 = _new_clientBit_T_1; // @[Parameters.scala:54:{32,67}] wire _new_clientBit_T_4 = new_clientBit_uncommonBits != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _new_clientBit_T_5 = _new_clientBit_T_3 & _new_clientBit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20] wire [1:0] new_clientBit_uncommonBits_1 = _new_clientBit_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _new_clientBit_T_7 = _new_clientBit_T_6 == 4'h8; // @[Parameters.scala:54:{10,32}] wire _new_clientBit_T_9 = _new_clientBit_T_7; // @[Parameters.scala:54:{32,67}] wire _new_clientBit_T_10 = new_clientBit_uncommonBits_1 != 2'h3; // @[Parameters.scala:52:56, :57:20] wire _new_clientBit_T_11 = _new_clientBit_T_9 & _new_clientBit_T_10; // @[Parameters.scala:54:67, :56:48, :57:20] wire [1:0] new_clientBit = {_new_clientBit_T_11, _new_clientBit_T_5}; // @[Parameters.scala:56:48] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire [1:0] new_skipProbe = _new_skipProbe_T_7 ? new_clientBit : 2'h0; // @[Parameters.scala:201:10, :279:106] wire [3:0] prior; // @[MSHR.scala:314:26] wire prior_c = |final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File INToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val signedIn = Input(Bool()) val in = Input(Bits(intWidth.W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in); val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( intAsRawFloat.expWidth, intWidth, expWidth, sigWidth, flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows )) roundAnyRawFNToRecFN.io.invalidExc := false.B roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := intAsRawFloat roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } File rawFloatFromIN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object rawFloatFromIN { def apply(signedIn: Bool, in: Bits): RawFloat = { val expWidth = log2Up(in.getWidth) + 1 //*** CHANGE THIS; CAN BE VERY LARGE: val extIntWidth = 1<<(expWidth - 1) val sign = signedIn && in(in.getWidth - 1) val absIn = Mux(sign, -in.asUInt, in.asUInt) val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0) val adjustedNormDist = countLeadingZeros(extAbsIn) val sig = (extAbsIn<<adjustedNormDist)( extIntWidth - 1, extIntWidth - in.getWidth) val out = Wire(new RawFloat(expWidth, in.getWidth)) out.isNaN := false.B out.isInf := false.B out.isZero := ! sig(in.getWidth - 1) out.sign := sign out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext out.sig := sig out } }
module INToRecFN_i64_e8_s24_4( // @[INToRecFN.scala:43:7] input io_signedIn, // @[INToRecFN.scala:46:16] input [63:0] io_in, // @[INToRecFN.scala:46:16] input [2:0] io_roundingMode, // @[INToRecFN.scala:46:16] output [32:0] io_out, // @[INToRecFN.scala:46:16] output [4:0] io_exceptionFlags // @[INToRecFN.scala:46:16] ); wire io_signedIn_0 = io_signedIn; // @[INToRecFN.scala:43:7] wire [63:0] io_in_0 = io_in; // @[INToRecFN.scala:43:7] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[INToRecFN.scala:43:7] wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23] wire io_detectTininess = 1'h1; // @[INToRecFN.scala:43:7] wire [32:0] io_out_0; // @[INToRecFN.scala:43:7] wire [4:0] io_exceptionFlags_0; // @[INToRecFN.scala:43:7] wire _intAsRawFloat_sign_T = io_in_0[63]; // @[rawFloatFromIN.scala:51:34] wire intAsRawFloat_sign = io_signedIn_0 & _intAsRawFloat_sign_T; // @[rawFloatFromIN.scala:51:{29,34}] wire intAsRawFloat_sign_0 = intAsRawFloat_sign; // @[rawFloatFromIN.scala:51:29, :59:23] wire [64:0] _intAsRawFloat_absIn_T = 65'h0 - {1'h0, io_in_0}; // @[rawFloatFromIN.scala:52:31] wire [63:0] _intAsRawFloat_absIn_T_1 = _intAsRawFloat_absIn_T[63:0]; // @[rawFloatFromIN.scala:52:31] wire [63:0] intAsRawFloat_absIn = intAsRawFloat_sign ? _intAsRawFloat_absIn_T_1 : io_in_0; // @[rawFloatFromIN.scala:51:29, :52:{24,31}] wire [127:0] _intAsRawFloat_extAbsIn_T = {64'h0, intAsRawFloat_absIn}; // @[rawFloatFromIN.scala:52:24, :53:44] wire [63:0] intAsRawFloat_extAbsIn = _intAsRawFloat_extAbsIn_T[63:0]; // @[rawFloatFromIN.scala:53:{44,53}] wire _intAsRawFloat_adjustedNormDist_T = intAsRawFloat_extAbsIn[0]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_1 = intAsRawFloat_extAbsIn[1]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_2 = intAsRawFloat_extAbsIn[2]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_3 = intAsRawFloat_extAbsIn[3]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_4 = intAsRawFloat_extAbsIn[4]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_5 = intAsRawFloat_extAbsIn[5]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_6 = intAsRawFloat_extAbsIn[6]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_7 = intAsRawFloat_extAbsIn[7]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_8 = intAsRawFloat_extAbsIn[8]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_9 = intAsRawFloat_extAbsIn[9]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_10 = intAsRawFloat_extAbsIn[10]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_11 = intAsRawFloat_extAbsIn[11]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_12 = intAsRawFloat_extAbsIn[12]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_13 = intAsRawFloat_extAbsIn[13]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_14 = intAsRawFloat_extAbsIn[14]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_15 = intAsRawFloat_extAbsIn[15]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_16 = intAsRawFloat_extAbsIn[16]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_17 = intAsRawFloat_extAbsIn[17]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_18 = intAsRawFloat_extAbsIn[18]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_19 = intAsRawFloat_extAbsIn[19]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_20 = intAsRawFloat_extAbsIn[20]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_21 = intAsRawFloat_extAbsIn[21]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_22 = intAsRawFloat_extAbsIn[22]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_23 = intAsRawFloat_extAbsIn[23]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_24 = intAsRawFloat_extAbsIn[24]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_25 = intAsRawFloat_extAbsIn[25]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_26 = intAsRawFloat_extAbsIn[26]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_27 = intAsRawFloat_extAbsIn[27]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_28 = intAsRawFloat_extAbsIn[28]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_29 = intAsRawFloat_extAbsIn[29]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_30 = intAsRawFloat_extAbsIn[30]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_31 = intAsRawFloat_extAbsIn[31]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_32 = intAsRawFloat_extAbsIn[32]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_33 = intAsRawFloat_extAbsIn[33]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_34 = intAsRawFloat_extAbsIn[34]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_35 = intAsRawFloat_extAbsIn[35]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_36 = intAsRawFloat_extAbsIn[36]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_37 = intAsRawFloat_extAbsIn[37]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_38 = intAsRawFloat_extAbsIn[38]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_39 = intAsRawFloat_extAbsIn[39]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_40 = intAsRawFloat_extAbsIn[40]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_41 = intAsRawFloat_extAbsIn[41]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_42 = intAsRawFloat_extAbsIn[42]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_43 = intAsRawFloat_extAbsIn[43]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_44 = intAsRawFloat_extAbsIn[44]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_45 = intAsRawFloat_extAbsIn[45]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_46 = intAsRawFloat_extAbsIn[46]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_47 = intAsRawFloat_extAbsIn[47]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_48 = intAsRawFloat_extAbsIn[48]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_49 = intAsRawFloat_extAbsIn[49]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_50 = intAsRawFloat_extAbsIn[50]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_51 = intAsRawFloat_extAbsIn[51]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_52 = intAsRawFloat_extAbsIn[52]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_53 = intAsRawFloat_extAbsIn[53]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_54 = intAsRawFloat_extAbsIn[54]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_55 = intAsRawFloat_extAbsIn[55]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_56 = intAsRawFloat_extAbsIn[56]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_57 = intAsRawFloat_extAbsIn[57]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_58 = intAsRawFloat_extAbsIn[58]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_59 = intAsRawFloat_extAbsIn[59]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_60 = intAsRawFloat_extAbsIn[60]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_61 = intAsRawFloat_extAbsIn[61]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_62 = intAsRawFloat_extAbsIn[62]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_63 = intAsRawFloat_extAbsIn[63]; // @[rawFloatFromIN.scala:53:53] wire [5:0] _intAsRawFloat_adjustedNormDist_T_64 = {5'h1F, ~_intAsRawFloat_adjustedNormDist_T_1}; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_65 = _intAsRawFloat_adjustedNormDist_T_2 ? 6'h3D : _intAsRawFloat_adjustedNormDist_T_64; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_66 = _intAsRawFloat_adjustedNormDist_T_3 ? 6'h3C : _intAsRawFloat_adjustedNormDist_T_65; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_67 = _intAsRawFloat_adjustedNormDist_T_4 ? 6'h3B : _intAsRawFloat_adjustedNormDist_T_66; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_68 = _intAsRawFloat_adjustedNormDist_T_5 ? 6'h3A : _intAsRawFloat_adjustedNormDist_T_67; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_69 = _intAsRawFloat_adjustedNormDist_T_6 ? 6'h39 : _intAsRawFloat_adjustedNormDist_T_68; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_70 = _intAsRawFloat_adjustedNormDist_T_7 ? 6'h38 : _intAsRawFloat_adjustedNormDist_T_69; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_71 = _intAsRawFloat_adjustedNormDist_T_8 ? 6'h37 : _intAsRawFloat_adjustedNormDist_T_70; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_72 = _intAsRawFloat_adjustedNormDist_T_9 ? 6'h36 : _intAsRawFloat_adjustedNormDist_T_71; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_73 = _intAsRawFloat_adjustedNormDist_T_10 ? 6'h35 : _intAsRawFloat_adjustedNormDist_T_72; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_74 = _intAsRawFloat_adjustedNormDist_T_11 ? 6'h34 : _intAsRawFloat_adjustedNormDist_T_73; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_75 = _intAsRawFloat_adjustedNormDist_T_12 ? 6'h33 : _intAsRawFloat_adjustedNormDist_T_74; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_76 = _intAsRawFloat_adjustedNormDist_T_13 ? 6'h32 : _intAsRawFloat_adjustedNormDist_T_75; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_77 = _intAsRawFloat_adjustedNormDist_T_14 ? 6'h31 : _intAsRawFloat_adjustedNormDist_T_76; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_78 = _intAsRawFloat_adjustedNormDist_T_15 ? 6'h30 : _intAsRawFloat_adjustedNormDist_T_77; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_79 = _intAsRawFloat_adjustedNormDist_T_16 ? 6'h2F : _intAsRawFloat_adjustedNormDist_T_78; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_80 = _intAsRawFloat_adjustedNormDist_T_17 ? 6'h2E : _intAsRawFloat_adjustedNormDist_T_79; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_81 = _intAsRawFloat_adjustedNormDist_T_18 ? 6'h2D : _intAsRawFloat_adjustedNormDist_T_80; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_82 = _intAsRawFloat_adjustedNormDist_T_19 ? 6'h2C : _intAsRawFloat_adjustedNormDist_T_81; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_83 = _intAsRawFloat_adjustedNormDist_T_20 ? 6'h2B : _intAsRawFloat_adjustedNormDist_T_82; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_84 = _intAsRawFloat_adjustedNormDist_T_21 ? 6'h2A : _intAsRawFloat_adjustedNormDist_T_83; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_85 = _intAsRawFloat_adjustedNormDist_T_22 ? 6'h29 : _intAsRawFloat_adjustedNormDist_T_84; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_86 = _intAsRawFloat_adjustedNormDist_T_23 ? 6'h28 : _intAsRawFloat_adjustedNormDist_T_85; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_87 = _intAsRawFloat_adjustedNormDist_T_24 ? 6'h27 : _intAsRawFloat_adjustedNormDist_T_86; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_88 = _intAsRawFloat_adjustedNormDist_T_25 ? 6'h26 : _intAsRawFloat_adjustedNormDist_T_87; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_89 = _intAsRawFloat_adjustedNormDist_T_26 ? 6'h25 : _intAsRawFloat_adjustedNormDist_T_88; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_90 = _intAsRawFloat_adjustedNormDist_T_27 ? 6'h24 : _intAsRawFloat_adjustedNormDist_T_89; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_91 = _intAsRawFloat_adjustedNormDist_T_28 ? 6'h23 : _intAsRawFloat_adjustedNormDist_T_90; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_92 = _intAsRawFloat_adjustedNormDist_T_29 ? 6'h22 : _intAsRawFloat_adjustedNormDist_T_91; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_93 = _intAsRawFloat_adjustedNormDist_T_30 ? 6'h21 : _intAsRawFloat_adjustedNormDist_T_92; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_94 = _intAsRawFloat_adjustedNormDist_T_31 ? 6'h20 : _intAsRawFloat_adjustedNormDist_T_93; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_95 = _intAsRawFloat_adjustedNormDist_T_32 ? 6'h1F : _intAsRawFloat_adjustedNormDist_T_94; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_96 = _intAsRawFloat_adjustedNormDist_T_33 ? 6'h1E : _intAsRawFloat_adjustedNormDist_T_95; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_97 = _intAsRawFloat_adjustedNormDist_T_34 ? 6'h1D : _intAsRawFloat_adjustedNormDist_T_96; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_98 = _intAsRawFloat_adjustedNormDist_T_35 ? 6'h1C : _intAsRawFloat_adjustedNormDist_T_97; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_99 = _intAsRawFloat_adjustedNormDist_T_36 ? 6'h1B : _intAsRawFloat_adjustedNormDist_T_98; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_100 = _intAsRawFloat_adjustedNormDist_T_37 ? 6'h1A : _intAsRawFloat_adjustedNormDist_T_99; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_101 = _intAsRawFloat_adjustedNormDist_T_38 ? 6'h19 : _intAsRawFloat_adjustedNormDist_T_100; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_102 = _intAsRawFloat_adjustedNormDist_T_39 ? 6'h18 : _intAsRawFloat_adjustedNormDist_T_101; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_103 = _intAsRawFloat_adjustedNormDist_T_40 ? 6'h17 : _intAsRawFloat_adjustedNormDist_T_102; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_104 = _intAsRawFloat_adjustedNormDist_T_41 ? 6'h16 : _intAsRawFloat_adjustedNormDist_T_103; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_105 = _intAsRawFloat_adjustedNormDist_T_42 ? 6'h15 : _intAsRawFloat_adjustedNormDist_T_104; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_106 = _intAsRawFloat_adjustedNormDist_T_43 ? 6'h14 : _intAsRawFloat_adjustedNormDist_T_105; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_107 = _intAsRawFloat_adjustedNormDist_T_44 ? 6'h13 : _intAsRawFloat_adjustedNormDist_T_106; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_108 = _intAsRawFloat_adjustedNormDist_T_45 ? 6'h12 : _intAsRawFloat_adjustedNormDist_T_107; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_109 = _intAsRawFloat_adjustedNormDist_T_46 ? 6'h11 : _intAsRawFloat_adjustedNormDist_T_108; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_110 = _intAsRawFloat_adjustedNormDist_T_47 ? 6'h10 : _intAsRawFloat_adjustedNormDist_T_109; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_111 = _intAsRawFloat_adjustedNormDist_T_48 ? 6'hF : _intAsRawFloat_adjustedNormDist_T_110; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_112 = _intAsRawFloat_adjustedNormDist_T_49 ? 6'hE : _intAsRawFloat_adjustedNormDist_T_111; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_113 = _intAsRawFloat_adjustedNormDist_T_50 ? 6'hD : _intAsRawFloat_adjustedNormDist_T_112; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_114 = _intAsRawFloat_adjustedNormDist_T_51 ? 6'hC : _intAsRawFloat_adjustedNormDist_T_113; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_115 = _intAsRawFloat_adjustedNormDist_T_52 ? 6'hB : _intAsRawFloat_adjustedNormDist_T_114; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_116 = _intAsRawFloat_adjustedNormDist_T_53 ? 6'hA : _intAsRawFloat_adjustedNormDist_T_115; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_117 = _intAsRawFloat_adjustedNormDist_T_54 ? 6'h9 : _intAsRawFloat_adjustedNormDist_T_116; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_118 = _intAsRawFloat_adjustedNormDist_T_55 ? 6'h8 : _intAsRawFloat_adjustedNormDist_T_117; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_119 = _intAsRawFloat_adjustedNormDist_T_56 ? 6'h7 : _intAsRawFloat_adjustedNormDist_T_118; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_120 = _intAsRawFloat_adjustedNormDist_T_57 ? 6'h6 : _intAsRawFloat_adjustedNormDist_T_119; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_121 = _intAsRawFloat_adjustedNormDist_T_58 ? 6'h5 : _intAsRawFloat_adjustedNormDist_T_120; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_122 = _intAsRawFloat_adjustedNormDist_T_59 ? 6'h4 : _intAsRawFloat_adjustedNormDist_T_121; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_123 = _intAsRawFloat_adjustedNormDist_T_60 ? 6'h3 : _intAsRawFloat_adjustedNormDist_T_122; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_124 = _intAsRawFloat_adjustedNormDist_T_61 ? 6'h2 : _intAsRawFloat_adjustedNormDist_T_123; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_125 = _intAsRawFloat_adjustedNormDist_T_62 ? 6'h1 : _intAsRawFloat_adjustedNormDist_T_124; // @[Mux.scala:50:70] wire [5:0] intAsRawFloat_adjustedNormDist = _intAsRawFloat_adjustedNormDist_T_63 ? 6'h0 : _intAsRawFloat_adjustedNormDist_T_125; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_out_sExp_T = intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] wire [126:0] _intAsRawFloat_sig_T = {63'h0, intAsRawFloat_extAbsIn} << intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] wire [63:0] intAsRawFloat_sig = _intAsRawFloat_sig_T[63:0]; // @[rawFloatFromIN.scala:56:{22,41}] wire _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:62:23] wire [8:0] _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:64:72] wire intAsRawFloat_isZero; // @[rawFloatFromIN.scala:59:23] wire [8:0] intAsRawFloat_sExp; // @[rawFloatFromIN.scala:59:23] wire [64:0] intAsRawFloat_sig_0; // @[rawFloatFromIN.scala:59:23] wire _intAsRawFloat_out_isZero_T = intAsRawFloat_sig[63]; // @[rawFloatFromIN.scala:56:41, :62:28] assign _intAsRawFloat_out_isZero_T_1 = ~_intAsRawFloat_out_isZero_T; // @[rawFloatFromIN.scala:62:{23,28}] assign intAsRawFloat_isZero = _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:59:23, :62:23] wire [5:0] _intAsRawFloat_out_sExp_T_1 = ~_intAsRawFloat_out_sExp_T; // @[rawFloatFromIN.scala:64:{36,53}] wire [7:0] _intAsRawFloat_out_sExp_T_2 = {2'h2, _intAsRawFloat_out_sExp_T_1}; // @[rawFloatFromIN.scala:64:{33,36}] assign _intAsRawFloat_out_sExp_T_3 = {1'h0, _intAsRawFloat_out_sExp_T_2}; // @[rawFloatFromIN.scala:64:{33,72}] assign intAsRawFloat_sExp = _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:59:23, :64:72] assign intAsRawFloat_sig_0 = {1'h0, intAsRawFloat_sig}; // @[rawFloatFromIN.scala:56:41, :59:23, :65:20] RoundAnyRawFNToRecFN_ie7_is64_oe8_os24_4 roundAnyRawFNToRecFN ( // @[INToRecFN.scala:60:15] .io_in_isZero (intAsRawFloat_isZero), // @[rawFloatFromIN.scala:59:23] .io_in_sign (intAsRawFloat_sign_0), // @[rawFloatFromIN.scala:59:23] .io_in_sExp (intAsRawFloat_sExp), // @[rawFloatFromIN.scala:59:23] .io_in_sig (intAsRawFloat_sig_0), // @[rawFloatFromIN.scala:59:23] .io_roundingMode (io_roundingMode_0), // @[INToRecFN.scala:43:7] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[INToRecFN.scala:60:15] assign io_out = io_out_0; // @[INToRecFN.scala:43:7] assign io_exceptionFlags = io_exceptionFlags_0; // @[INToRecFN.scala:43:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_158( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [31:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7] wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54] wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TLBuffer_a32d64s6k3z3c( // @[Buffer.scala:40:9] input clock, // @[Buffer.scala:40:9] input reset, // @[Buffer.scala:40:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_in_b_valid, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_b_bits_param, // @[LazyModuleImp.scala:107:25] output [31:0] auto_in_b_bits_address, // @[LazyModuleImp.scala:107:25] output auto_in_c_ready, // @[LazyModuleImp.scala:107:25] input auto_in_c_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_e_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_out_b_valid, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input auto_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_e_bits_sink // @[LazyModuleImp.scala:107:25] ); wire _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21] wire [2:0] _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] wire [1:0] _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21] wire [2:0] _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21] wire [5:0] _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21] wire [2:0] _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] wire _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21] TLMonitor_35 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (_nodeOut_a_q_io_enq_ready), // @[Decoupled.scala:362:21] .io_in_a_valid (auto_in_a_valid), .io_in_a_bits_opcode (auto_in_a_bits_opcode), .io_in_a_bits_param (auto_in_a_bits_param), .io_in_a_bits_size (auto_in_a_bits_size), .io_in_a_bits_source (auto_in_a_bits_source), .io_in_a_bits_address (auto_in_a_bits_address), .io_in_a_bits_mask (auto_in_a_bits_mask), .io_in_a_bits_corrupt (auto_in_a_bits_corrupt), .io_in_b_ready (auto_in_b_ready), .io_in_b_valid (auto_out_b_valid), .io_in_b_bits_param (auto_out_b_bits_param), .io_in_b_bits_address (auto_out_b_bits_address), .io_in_c_ready (auto_out_c_ready), .io_in_c_valid (auto_in_c_valid), .io_in_c_bits_opcode (auto_in_c_bits_opcode), .io_in_c_bits_param (auto_in_c_bits_param), .io_in_c_bits_size (auto_in_c_bits_size), .io_in_c_bits_source (auto_in_c_bits_source), .io_in_c_bits_address (auto_in_c_bits_address), .io_in_c_bits_corrupt (auto_in_c_bits_corrupt), .io_in_d_ready (auto_in_d_ready), .io_in_d_valid (_nodeIn_d_q_io_deq_valid), // @[Decoupled.scala:362:21] .io_in_d_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), // @[Decoupled.scala:362:21] .io_in_d_bits_param (_nodeIn_d_q_io_deq_bits_param), // @[Decoupled.scala:362:21] .io_in_d_bits_size (_nodeIn_d_q_io_deq_bits_size), // @[Decoupled.scala:362:21] .io_in_d_bits_source (_nodeIn_d_q_io_deq_bits_source), // @[Decoupled.scala:362:21] .io_in_d_bits_sink (_nodeIn_d_q_io_deq_bits_sink), // @[Decoupled.scala:362:21] .io_in_d_bits_denied (_nodeIn_d_q_io_deq_bits_denied), // @[Decoupled.scala:362:21] .io_in_d_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt), // @[Decoupled.scala:362:21] .io_in_e_valid (auto_in_e_valid), .io_in_e_bits_sink (auto_in_e_bits_sink) ); // @[Nodes.scala:27:25] Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (_nodeOut_a_q_io_enq_ready), .io_enq_valid (auto_in_a_valid), .io_enq_bits_opcode (auto_in_a_bits_opcode), .io_enq_bits_param (auto_in_a_bits_param), .io_enq_bits_size (auto_in_a_bits_size), .io_enq_bits_source (auto_in_a_bits_source), .io_enq_bits_address (auto_in_a_bits_address), .io_enq_bits_mask (auto_in_a_bits_mask), .io_enq_bits_data (auto_in_a_bits_data), .io_enq_bits_corrupt (auto_in_a_bits_corrupt), .io_deq_ready (auto_out_a_ready), .io_deq_valid (auto_out_a_valid), .io_deq_bits_opcode (auto_out_a_bits_opcode), .io_deq_bits_param (auto_out_a_bits_param), .io_deq_bits_size (auto_out_a_bits_size), .io_deq_bits_source (auto_out_a_bits_source), .io_deq_bits_address (auto_out_a_bits_address), .io_deq_bits_mask (auto_out_a_bits_mask), .io_deq_bits_data (auto_out_a_bits_data), .io_deq_bits_corrupt (auto_out_a_bits_corrupt) ); // @[Decoupled.scala:362:21] Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (auto_out_d_ready), .io_enq_valid (auto_out_d_valid), .io_enq_bits_opcode (auto_out_d_bits_opcode), .io_enq_bits_param (auto_out_d_bits_param), .io_enq_bits_size (auto_out_d_bits_size), .io_enq_bits_source (auto_out_d_bits_source), .io_enq_bits_sink (auto_out_d_bits_sink), .io_enq_bits_denied (auto_out_d_bits_denied), .io_enq_bits_data (auto_out_d_bits_data), .io_enq_bits_corrupt (auto_out_d_bits_corrupt), .io_deq_ready (auto_in_d_ready), .io_deq_valid (_nodeIn_d_q_io_deq_valid), .io_deq_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), .io_deq_bits_param (_nodeIn_d_q_io_deq_bits_param), .io_deq_bits_size (_nodeIn_d_q_io_deq_bits_size), .io_deq_bits_source (_nodeIn_d_q_io_deq_bits_source), .io_deq_bits_sink (_nodeIn_d_q_io_deq_bits_sink), .io_deq_bits_denied (_nodeIn_d_q_io_deq_bits_denied), .io_deq_bits_data (auto_in_d_bits_data), .io_deq_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt) ); // @[Decoupled.scala:362:21] assign auto_in_a_ready = _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21] assign auto_in_b_valid = auto_out_b_valid; // @[Buffer.scala:40:9] assign auto_in_b_bits_param = auto_out_b_bits_param; // @[Buffer.scala:40:9] assign auto_in_b_bits_address = auto_out_b_bits_address; // @[Buffer.scala:40:9] assign auto_in_c_ready = auto_out_c_ready; // @[Buffer.scala:40:9] assign auto_in_d_valid = _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21] assign auto_in_d_bits_opcode = _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] assign auto_in_d_bits_param = _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21] assign auto_in_d_bits_size = _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21] assign auto_in_d_bits_source = _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21] assign auto_in_d_bits_sink = _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21] assign auto_in_d_bits_denied = _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21] assign auto_in_d_bits_corrupt = _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] assign auto_out_b_ready = auto_in_b_ready; // @[Buffer.scala:40:9] assign auto_out_c_valid = auto_in_c_valid; // @[Buffer.scala:40:9] assign auto_out_c_bits_opcode = auto_in_c_bits_opcode; // @[Buffer.scala:40:9] assign auto_out_c_bits_param = auto_in_c_bits_param; // @[Buffer.scala:40:9] assign auto_out_c_bits_size = auto_in_c_bits_size; // @[Buffer.scala:40:9] assign auto_out_c_bits_source = auto_in_c_bits_source; // @[Buffer.scala:40:9] assign auto_out_c_bits_address = auto_in_c_bits_address; // @[Buffer.scala:40:9] assign auto_out_c_bits_data = auto_in_c_bits_data; // @[Buffer.scala:40:9] assign auto_out_c_bits_corrupt = auto_in_c_bits_corrupt; // @[Buffer.scala:40:9] assign auto_out_e_valid = auto_in_e_valid; // @[Buffer.scala:40:9] assign auto_out_e_bits_sink = auto_in_e_bits_sink; // @[Buffer.scala:40:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_44( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [127:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [127:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready = 1'h1; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31] wire mask_sub_sub_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21] wire mask_sub_sub_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_size = 1'h1; // @[Misc.scala:209:26] wire mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_2_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_3_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_2_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_3_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_4_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_5_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_6_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_7_1 = 1'h1; // @[Misc.scala:215:29] wire mask_size = 1'h1; // @[Misc.scala:209:26] wire mask_acc = 1'h1; // @[Misc.scala:215:29] wire mask_acc_1 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_2 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_3 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_4 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_5 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_6 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_7 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_8 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_9 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_10 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_11 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_12 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_13 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_14 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_15 = 1'h1; // @[Misc.scala:215:29] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31] wire sink_ok = 1'h1; // @[Monitor.scala:309:31] wire _a_first_beats1_opdata_T = 1'h1; // @[Edges.scala:92:37] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire _a_first_beats1_opdata_T_1 = 1'h1; // @[Edges.scala:92:37] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_2 = 1'h1; // @[Monitor.scala:684:113] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_8 = 1'h1; // @[Monitor.scala:795:113] wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_source = 1'h0; // @[Monitor.scala:36:7] wire mask_sub_sub_sub_size = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_sub_sub_acc_T = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_sub_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38] wire mask_sub_size = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_4 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_5 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_6 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_7 = 1'h0; // @[Misc.scala:215:38] wire a_first_beats1_opdata = 1'h0; // @[Edges.scala:92:28] wire a_first_beats1_opdata_1 = 1'h0; // @[Edges.scala:92:28] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire c_set = 1'h0; // @[Monitor.scala:738:34] wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [7:0] a_first_beats1 = 8'h0; // @[Edges.scala:221:14] wire [7:0] a_first_count = 8'h0; // @[Edges.scala:234:25] wire [7:0] a_first_beats1_1 = 8'h0; // @[Edges.scala:221:14] wire [7:0] a_first_count_1 = 8'h0; // @[Edges.scala:234:25] wire [7:0] c_first_beats1_decode = 8'h0; // @[Edges.scala:220:59] wire [7:0] c_first_beats1 = 8'h0; // @[Edges.scala:221:14] wire [7:0] _c_first_count_T = 8'h0; // @[Edges.scala:234:27] wire [7:0] c_first_count = 8'h0; // @[Edges.scala:234:25] wire [7:0] _c_first_counter_T = 8'h0; // @[Edges.scala:236:21] wire [7:0] c_sizes_set = 8'h0; // @[Monitor.scala:741:34] wire [7:0] mask_lo = 8'hFF; // @[Misc.scala:222:10] wire [7:0] mask_hi = 8'hFF; // @[Misc.scala:222:10] wire [7:0] c_first_counter1 = 8'hFF; // @[Edges.scala:230:28] wire [8:0] _c_first_counter1_T = 9'h1FF; // @[Edges.scala:230:28] wire [3:0] io_in_a_bits_size = 4'h6; // @[Monitor.scala:36:7] wire [3:0] _mask_sizeOH_T = 4'h6; // @[Misc.scala:202:34] wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] io_in_a_bits_opcode = 3'h4; // @[Monitor.scala:36:7] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [15:0] io_in_a_bits_mask = 16'hFFFF; // @[Monitor.scala:36:7] wire [15:0] mask = 16'hFFFF; // @[Misc.scala:222:10] wire [127:0] io_in_a_bits_data = 128'h0; // @[Monitor.scala:36:7] wire [127:0] _c_first_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_first_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_first_WIRE_2_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_first_WIRE_3_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_set_wo_ready_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_set_wo_ready_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_set_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_set_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_opcodes_set_interm_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_opcodes_set_interm_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_sizes_set_interm_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_sizes_set_interm_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_opcodes_set_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_opcodes_set_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_sizes_set_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_sizes_set_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_probe_ack_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_probe_ack_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _c_probe_ack_WIRE_2_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _c_probe_ack_WIRE_3_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _same_cycle_resp_WIRE_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _same_cycle_resp_WIRE_1_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _same_cycle_resp_WIRE_2_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _same_cycle_resp_WIRE_3_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [127:0] _same_cycle_resp_WIRE_4_bits_data = 128'h0; // @[Bundles.scala:265:74] wire [127:0] _same_cycle_resp_WIRE_5_bits_data = 128'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [3:0] _a_opcode_lookup_T = 4'h0; // @[Monitor.scala:637:69] wire [3:0] _a_size_lookup_T = 4'h0; // @[Monitor.scala:641:65] wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79] wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77] wire [3:0] _d_opcodes_clr_T_4 = 4'h0; // @[Monitor.scala:680:101] wire [3:0] _d_sizes_clr_T_4 = 4'h0; // @[Monitor.scala:681:99] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34] wire [3:0] _c_opcode_lookup_T = 4'h0; // @[Monitor.scala:749:69] wire [3:0] _c_size_lookup_T = 4'h0; // @[Monitor.scala:750:67] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _d_opcodes_clr_T_10 = 4'h0; // @[Monitor.scala:790:101] wire [3:0] _d_sizes_clr_T_10 = 4'h0; // @[Monitor.scala:791:99] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [30:0] _d_sizes_clr_T_5 = 31'hFF; // @[Monitor.scala:681:74] wire [30:0] _d_sizes_clr_T_11 = 31'hFF; // @[Monitor.scala:791:74] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _a_opcodes_set_interm_T = 4'h8; // @[Monitor.scala:657:53] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] mask_lo_lo = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_lo_hi = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_hi_lo = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_hi_hi = 4'hF; // @[Misc.scala:222:10] wire [30:0] _d_opcodes_clr_T_5 = 31'hF; // @[Monitor.scala:680:76] wire [30:0] _d_opcodes_clr_T_11 = 31'hF; // @[Monitor.scala:790:76] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [3:0] _mask_sizeOH_T_1 = 4'h4; // @[OneHot.scala:65:12] wire [3:0] _mask_sizeOH_T_2 = 4'h4; // @[OneHot.scala:65:27] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [19:0] _c_sizes_set_T_1 = 20'h0; // @[Monitor.scala:768:52] wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [4:0] _a_sizes_set_interm_T_1 = 5'hD; // @[Monitor.scala:658:59] wire [4:0] _a_sizes_set_interm_T = 5'hC; // @[Monitor.scala:658:51] wire [3:0] _a_opcodes_set_interm_T_1 = 4'h9; // @[Monitor.scala:657:61] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [7:0] a_first_beats1_decode = 8'h3; // @[Edges.scala:220:59] wire [7:0] a_first_beats1_decode_1 = 8'h3; // @[Edges.scala:220:59] wire [11:0] is_aligned_mask = 12'h3F; // @[package.scala:243:46] wire [11:0] _a_first_beats1_decode_T_2 = 12'h3F; // @[package.scala:243:46] wire [11:0] _a_first_beats1_decode_T_5 = 12'h3F; // @[package.scala:243:46] wire [11:0] _is_aligned_mask_T_1 = 12'hFC0; // @[package.scala:243:76] wire [11:0] _a_first_beats1_decode_T_1 = 12'hFC0; // @[package.scala:243:76] wire [11:0] _a_first_beats1_decode_T_4 = 12'hFC0; // @[package.scala:243:76] wire [26:0] _is_aligned_mask_T = 27'h3FFC0; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T = 27'h3FFC0; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3 = 27'h3FFC0; // @[package.scala:243:71] wire [1:0] mask_lo_lo_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_lo_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_hi = 2'h3; // @[Misc.scala:222:10] wire [3:0] mask_sizeOH = 4'h5; // @[Misc.scala:202:81] wire [1:0] mask_sizeOH_shiftAmount = 2'h2; // @[OneHot.scala:64:49] wire _d_first_T = io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T_1 = io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T_2 = io_in_d_valid_0; // @[Decoupled.scala:51:35] wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0]}; // @[Monitor.scala:36:7] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire mask_sub_sub_sub_bit = io_in_a_bits_address_0[3]; // @[Misc.scala:210:26] wire mask_sub_sub_sub_1_2 = mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_sub_nbit = ~mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_sub_0_2 = mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_0_2; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_1_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_1_2; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_2_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_2 = mask_sub_sub_2_2; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_3_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_3 = mask_sub_sub_3_2; // @[Misc.scala:214:27, :215:38] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_4_2 = mask_sub_sub_2_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_5_2 = mask_sub_sub_2_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_6_2 = mask_sub_sub_3_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_7_2 = mask_sub_sub_3_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_eq; // @[Misc.scala:214:27, :215:38] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_eq_1; // @[Misc.scala:214:27, :215:38] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_eq_2; // @[Misc.scala:214:27, :215:38] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_eq_3; // @[Misc.scala:214:27, :215:38] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_eq_4; // @[Misc.scala:214:27, :215:38] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_eq_5; // @[Misc.scala:214:27, :215:38] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_eq_6; // @[Misc.scala:214:27, :215:38] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_eq_7; // @[Misc.scala:214:27, :215:38] wire mask_eq_8 = mask_sub_4_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_8 = mask_eq_8; // @[Misc.scala:214:27, :215:38] wire mask_eq_9 = mask_sub_4_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_9 = mask_eq_9; // @[Misc.scala:214:27, :215:38] wire mask_eq_10 = mask_sub_5_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_10 = mask_eq_10; // @[Misc.scala:214:27, :215:38] wire mask_eq_11 = mask_sub_5_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_11 = mask_eq_11; // @[Misc.scala:214:27, :215:38] wire mask_eq_12 = mask_sub_6_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_12 = mask_eq_12; // @[Misc.scala:214:27, :215:38] wire mask_eq_13 = mask_sub_6_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_13 = mask_eq_13; // @[Misc.scala:214:27, :215:38] wire mask_eq_14 = mask_sub_7_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_14 = mask_eq_14; // @[Misc.scala:214:27, :215:38] wire mask_eq_15 = mask_sub_7_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_15 = mask_eq_15; // @[Misc.scala:214:27, :215:38] wire _T_1222 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1222; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1222; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] reg [7:0] a_first_counter; // @[Edges.scala:229:27] wire [8:0] _a_first_counter1_T = {1'h0, a_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] a_first_counter1 = _a_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire [7:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] _a_first_counter_T = a_first ? 8'h0 : a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [31:0] address; // @[Monitor.scala:391:22] wire [26:0] _GEN = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [7:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T = {1'h0, d_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1 = _d_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [3:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes; // @[Monitor.scala:616:35, :637:44] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] wire [7:0] _a_size_lookup_T_1 = inflight_sizes; // @[Monitor.scala:618:33, :641:40] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] reg [7:0] a_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] a_first_counter1_1 = _a_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire [7:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] _a_first_counter_T_1 = a_first_1 ? 8'h0 : a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_1 = _d_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire a_set; // @[Monitor.scala:626:34] wire a_set_wo_ready; // @[Monitor.scala:627:34] wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [7:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}] wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [15:0] _a_size_lookup_T_6 = {8'h0, _a_size_lookup_T_1}; // @[Monitor.scala:641:{40,91}] wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _T_1145 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26] assign a_set_wo_ready = _T_1145; // @[Monitor.scala:627:34, :651:26] wire _same_cycle_resp_T; // @[Monitor.scala:684:44] assign _same_cycle_resp_T = _T_1145; // @[Monitor.scala:651:26, :684:44] assign a_set = _T_1222 & a_first_1; // @[Decoupled.scala:51:35] assign a_opcodes_set_interm = a_set ? 4'h9 : 4'h0; // @[Monitor.scala:626:34, :646:40, :655:70, :657:28] assign a_sizes_set_interm = a_set ? 5'hD : 5'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:28] wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[package.scala:243:71] assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}] wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[package.scala:243:71] assign a_sizes_set = a_set ? _a_sizes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}] wire d_clr; // @[Monitor.scala:664:34] wire d_clr_wo_ready; // @[Monitor.scala:665:34] wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [7:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_0 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_0; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_0; // @[Monitor.scala:673:46, :783:46] wire _T_1194 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] assign d_clr_wo_ready = _T_1194 & ~d_release_ack; // @[Monitor.scala:665:34, :673:46, :674:{26,71,74}] assign d_clr = io_in_d_valid_0 & d_first_1 & ~d_release_ack; // @[Monitor.scala:36:7, :664:34, :673:46, :674:74, :678:{25,70}] assign d_opcodes_clr = {4{d_clr}}; // @[Monitor.scala:664:34, :668:33, :678:89, :680:21] assign d_sizes_clr = {8{d_clr}}; // @[Monitor.scala:664:34, :670:31, :678:89, :681:21] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire same_cycle_resp = _same_cycle_resp_T_1; // @[Monitor.scala:684:{55,88}] wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27] wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}] wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [7:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [7:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [7:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1:0] inflight_1; // @[Monitor.scala:726:35] wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1; // @[Monitor.scala:727:35, :749:44] wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [7:0] _c_size_lookup_T_1 = inflight_sizes_1; // @[Monitor.scala:728:35, :750:42] wire [7:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_2; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_2 = _d_first_counter1_T_2[7:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:749:{44,97}] wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [15:0] _c_size_lookup_T_6 = {8'h0, _c_size_lookup_T_1}; // @[Monitor.scala:750:{42,93}] wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire d_clr_1; // @[Monitor.scala:774:34] wire d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [7:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1266 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1266 & d_release_ack_1; // @[Monitor.scala:775:34, :783:46, :784:{26,71}] assign d_clr_1 = io_in_d_valid_0 & d_first_2 & d_release_ack_1; // @[Monitor.scala:36:7, :774:34, :783:46, :788:{25,70}] assign d_opcodes_clr_1 = {4{d_clr_1}}; // @[Monitor.scala:774:34, :776:34, :788:88, :790:21] assign d_sizes_clr_1 = {8{d_clr_1}}; // @[Monitor.scala:774:34, :777:34, :788:88, :791:21] wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}] wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [7:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [7:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_45( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [10:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [10:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [25:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [10:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count = 1'h0; // @[Edges.scala:234:25] wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27] wire c_first_count = 1'h0; // @[Edges.scala:234:25] wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21] wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire d_first_last = 1'h1; // @[Edges.scala:232:33] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33] wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [25:0] _c_first_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_first_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_first_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_first_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_set_wo_ready_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_set_wo_ready_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_opcodes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_opcodes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_sizes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_sizes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_opcodes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_opcodes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_sizes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_sizes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_probe_ack_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_probe_ack_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_probe_ack_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_probe_ack_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_4_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_5_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [10:0] _c_first_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_first_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_first_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_first_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_set_wo_ready_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_set_wo_ready_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_opcodes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_opcodes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_sizes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_sizes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_opcodes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_opcodes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_sizes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_sizes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_probe_ack_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_probe_ack_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_probe_ack_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_probe_ack_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _same_cycle_resp_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _same_cycle_resp_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _same_cycle_resp_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _same_cycle_resp_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _same_cycle_resp_WIRE_4_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _same_cycle_resp_WIRE_5_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46] wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [16385:0] _c_sizes_set_T_1 = 16386'h0; // @[Monitor.scala:768:52] wire [13:0] _c_opcodes_set_T = 14'h0; // @[Monitor.scala:767:79] wire [13:0] _c_sizes_set_T = 14'h0; // @[Monitor.scala:768:77] wire [16386:0] _c_opcodes_set_T_1 = 16387'h0; // @[Monitor.scala:767:54] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [2047:0] _c_set_wo_ready_T = 2048'h1; // @[OneHot.scala:58:35] wire [2047:0] _c_set_T = 2048'h1; // @[OneHot.scala:58:35] wire [4159:0] c_opcodes_set = 4160'h0; // @[Monitor.scala:740:34] wire [4159:0] c_sizes_set = 4160'h0; // @[Monitor.scala:741:34] wire [1039:0] c_set = 1040'h0; // @[Monitor.scala:738:34] wire [1039:0] c_set_wo_ready = 1040'h0; // @[Monitor.scala:739:34] wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76] wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [10:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_4 = source_ok_uncommonBits < 11'h410; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [25:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 26'h0; // @[Edges.scala:21:{16,24}] wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [10:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [10:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_10 = source_ok_uncommonBits_1 < 11'h410; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_665; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg a_first_counter; // @[Edges.scala:229:27] wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28] wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [1:0] size; // @[Monitor.scala:389:22] reg [10:0] source; // @[Monitor.scala:390:22] reg [25:0] address; // @[Monitor.scala:391:22] wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35] wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35] wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] reg d_first_counter; // @[Edges.scala:229:27] wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28] wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg [10:0] source_1; // @[Monitor.scala:541:22] reg [1039:0] inflight; // @[Monitor.scala:614:27] reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] reg a_first_counter_1; // @[Edges.scala:229:27] wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] reg d_first_counter_1; // @[Edges.scala:229:27] wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [1039:0] a_set; // @[Monitor.scala:626:34] wire [1039:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [4159:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [4159:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [13:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [13:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [13:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [13:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [13:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [13:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [13:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [13:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [13:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [4159:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [4159:0] _a_opcode_lookup_T_6 = {4156'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [4159:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [4159:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [4159:0] _a_size_lookup_T_6 = {4156'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [4159:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[4159:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [2047:0] _GEN_2 = 2048'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [2047:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [2047:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_598 ? _a_set_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [13:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [13:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [13:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [16386:0] _a_opcodes_set_T_1 = {16383'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [16385:0] _a_sizes_set_T_1 = {16383'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [1039:0] d_clr; // @[Monitor.scala:664:34] wire [1039:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [4159:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [4159:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [2047:0] _GEN_5 = 2048'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_613 ? _d_clr_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire [16398:0] _d_opcodes_clr_T_5 = 16399'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [16398:0] _d_sizes_clr_T_5 = 16399'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [1039:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [1039:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1039:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [4159:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [4159:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [4159:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [4159:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [4159:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [4159:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1039:0] inflight_1; // @[Monitor.scala:726:35] wire [1039:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [4159:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [4159:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [4159:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] reg d_first_counter_2; // @[Edges.scala:229:27] wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28] wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [4159:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [4159:0] _c_opcode_lookup_T_6 = {4156'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [4159:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [4159:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [4159:0] _c_size_lookup_T_6 = {4156'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [4159:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[4159:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [1039:0] d_clr_1; // @[Monitor.scala:774:34] wire [1039:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [4159:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [4159:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_691 ? _d_clr_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire [16398:0] _d_opcodes_clr_T_11 = 16399'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [16398:0] _d_sizes_clr_T_11 = 16399'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 11'h0; // @[Monitor.scala:36:7, :795:113] wire [1039:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1039:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [4159:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [4159:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [4159:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [4159:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File decode.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ package boom.v4.exu import chisel3._ import chisel3.util._ import freechips.rocketchip.tile.FPConstants import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket.Instructions32 import freechips.rocketchip.rocket.CustomInstructions._ import freechips.rocketchip.rocket.RVCExpander import freechips.rocketchip.rocket.ALU._ import freechips.rocketchip.rocket.{CSR, Causes, DecodeLogic} import freechips.rocketchip.util._ import boom.v4.common._ import boom.v4.util._ // scalastyle:off /** * Abstract trait giving defaults and other relevant values to different Decode constants/ */ object DecodeTables extends freechips.rocketchip.rocket.constants.ScalarOpConstants with freechips.rocketchip.rocket.constants.MemoryOpConstants with freechips.rocketchip.tile.HasFPUParameters { lazy val fLen = 64 lazy val minFLen = 32 def xLen = 64 def xpr64 = Y // TODO inform this from xLen def DC(i: Int) = BitPat.dontCare(i) def fc2oh(fc: Int): UInt = (1 << fc).U(FC_SZ.W) // FP stores generate data through FP F2I, and generate address through MemAddrCalc def FCOH_F2IMEM = ((1 << FC_AGEN) | (1 << FC_F2I )).U(FC_SZ.W) def FCOH_STORE = ((1 << FC_AGEN) | (1 << FC_DGEN)).U(FC_SZ.W) def FN_00 = BitPat("b???00") def FN_01 = BitPat("b???01") def FN_10 = BitPat("b???10") def FN_11 = BitPat("b???11") def decode_default: List[BitPat] = // frs3_en // is val inst? | imm sel // | is fp inst? | | uses_ldq // | | rs1 regtype | | | uses_stq is unique? (clear pipeline for it) // | | | rs2 type| | | | is_amo | flush on commit // | | func unit | | | | | | | | | csr cmd // | | | | | | | | | | | | | fcn_dw swap12 fma // | | | dst | | | | | | | mem | | | | fcn_op | swap32 | div // | | | regtype | | | | | | | cmd | | | | | | | typeTagIn | | sqrt // | | | | | | | | | | | | | | | | | ldst | | | typeTagOut | | wflags // | | | | | | | | | | | | | | | | | | wen | | | | from_int | | | // | | | | | | | | | | | | | | | | | | | ren1 | | | | | to_int | | | // | | | | | | | | | | | | | | | | | | | | ren2 | | | | | | fast | | | // | | | | | | | | | | | | | | | | | | | | | ren3 | | | | | | | | | | // | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | List(N, N, DC(FC_SZ) , RT_X , DC(2) , DC(2) , X, IS_N, X, X, X, M_X, N, X, CSR.X, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X) def X32_table: Seq[(BitPat, List[BitPat])] = { import Instructions32._; Seq( SLLI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRLI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRAI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SRA , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X) ) } def X64_table: Seq[(BitPat, List[BitPat])] = Seq( LD -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LWU -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SD -> List(Y, N, FCOH_STORE , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, M_XWR , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLLI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRLI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRAI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SRA , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ADDIW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLLIW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRAIW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SRA , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRLIW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ADDW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SUBW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SUB , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLLW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRAW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SRA , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRLW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X) ) def X_table: Seq[(BitPat, List[BitPat])] = Seq( LW -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LH -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LHU -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LB -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LBU -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SW -> List(Y, N, FCOH_STORE , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, M_XWR , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SH -> List(Y, N, FCOH_STORE , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, M_XWR , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SB -> List(Y, N, FCOH_STORE , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, M_XWR , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LUI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ADDI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ANDI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_AND , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ORI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_OR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), XORI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_XOR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLTI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLT , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLTIU -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLTU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLL -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ADD -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SUB -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SUB , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLT -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLT , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLTU -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLTU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AND -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_AND , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), OR -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_OR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), XOR -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_XOR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRA -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SRA , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRL -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MUL -> List(Y, N, fc2oh(FC_MUL) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MUL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MULH -> List(Y, N, fc2oh(FC_MUL) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MULH, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MULHU -> List(Y, N, fc2oh(FC_MUL) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MULHU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MULHSU -> List(Y, N, fc2oh(FC_MUL) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MULHSU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MULW -> List(Y, N, fc2oh(FC_MUL) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_MUL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), DIV -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_DIV , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), DIVU -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_DIVU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), REM -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_REM , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), REMU -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_REMU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), DIVW -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_DIV , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), DIVUW -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_DIVU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), REMW -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_REM , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), REMUW -> List(Y, N, fc2oh(FC_DIV) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_32 , FN_REMU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AUIPC -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), // use BRU for the PC read JAL -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_X , RT_X , N, IS_J, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), JALR -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BEQ -> List(Y, N, fc2oh(FC_ALU) , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SUB , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BNE -> List(Y, N, fc2oh(FC_ALU) , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SUB , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BGE -> List(Y, N, fc2oh(FC_ALU) , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLT , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BGEU -> List(Y, N, fc2oh(FC_ALU) , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLTU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BLT -> List(Y, N, fc2oh(FC_ALU) , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLT , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BLTU -> List(Y, N, fc2oh(FC_ALU) , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_SLTU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), // I-type, the immedia2 holds the CSR regi ster. CSRRW -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.W, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CSRRS -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.S, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CSRRC -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.C, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CSRRWI -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.W, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CSRRSI -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.S, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CSRRCI -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.C, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SFENCE_VMA ->List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_FIX, N, IS_N, N, N, N,M_SFENCE , Y, Y, CSR.R, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ECALL -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.I, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), EBREAK -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.I, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SRET -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.I, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MRET -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.I, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), DRET -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.I, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), WFI -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_I, N, N, N, M_X , Y, Y, CSR.I, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), FENCE_I -> List(Y, N, 0.U(FC_SZ.W) , RT_X , RT_X , RT_X , N, IS_N, N, N, N, M_X , Y, Y, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), FENCE -> List(Y, N, 0.U(FC_SZ.W) , RT_X , RT_X , RT_X , N, IS_N, N, Y, N, M_X , Y, Y, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), // TODO PERF make fence higher performance // currently serializes pipeline // A-type AMOADD_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_ADD, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), // TODO make AMOs higherperformance AMOXOR_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_XOR, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOSWAP_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_SWAP,Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOAND_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_AND, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOOR_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_OR, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMIN_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MIN, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMINU_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MINU,Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMAX_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MAX, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMAXU_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MAXU,Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOADD_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_ADD, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOXOR_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_XOR, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOSWAP_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_SWAP,Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOAND_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_AND, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOOR_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_OR, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMIN_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MIN, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMINU_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MINU,Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMAX_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MAX, Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), AMOMAXU_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XA_MAXU,Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LR_W -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_N, Y, N, N, M_XLR , Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), LR_D -> List(Y, N, fc2oh(FC_AGEN), RT_FIX, RT_FIX, RT_X , N, IS_N, Y, N, N, M_XLR , Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SC_W -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XSC , Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SC_D -> List(Y, N, FCOH_STORE , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, Y, Y, M_XSC , Y, Y, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X) ) def F_table: Seq[(BitPat, List[BitPat])] = Seq( FLW -> List(Y, Y, fc2oh(FC_AGEN), RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), FLD -> List(Y, Y, fc2oh(FC_AGEN), RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, M_XRD , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), FSW -> List(Y, Y, FCOH_F2IMEM , RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, M_XWR , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), // sort of a lie; broken into two micro-ops FSD -> List(Y, Y, FCOH_F2IMEM , RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, M_XWR , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), FCLASS_S -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,N), FCLASS_D -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,N), FMV_W_X -> List(Y, Y, fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,S,D,Y,N,N, N,N,N,N), FMV_D_X -> List(Y, Y, fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,N), FMV_X_W -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,S,N,Y,N, N,N,N,N), FMV_X_D -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,N), FSGNJ_S -> List(Y, Y, fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,N), FSGNJ_D -> List(Y, Y, fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,N), FSGNJX_S -> List(Y, Y, fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,N), FSGNJX_D -> List(Y, Y, fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,N), FSGNJN_S -> List(Y, Y, fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,N), FSGNJN_D -> List(Y, Y, fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,N), // FP to FP FCVT_S_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,S,N,N,Y, N,N,N,Y), FCVT_D_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,S,D,N,N,Y, N,N,N,Y), // Int to FP FCVT_S_W -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,S,S,Y,N,N, N,N,N,Y), FCVT_S_WU -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,S,S,Y,N,N, N,N,N,Y), FCVT_S_L -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,S,S,Y,N,N, N,N,N,Y), FCVT_S_LU -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,S,S,Y,N,N, N,N,N,Y), FCVT_D_W -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,Y), FCVT_D_WU -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,Y), FCVT_D_L -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,Y), FCVT_D_LU -> List(Y, Y,fc2oh(FC_I2F) , RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,Y), // FP to Int FCVT_W_S -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,Y), FCVT_WU_S -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,Y), FCVT_L_S -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,Y), FCVT_LU_S -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,Y), FCVT_W_D -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,Y), FCVT_WU_D -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,Y), FCVT_L_D -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,Y), FCVT_LU_D -> List(Y, Y,fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,Y), FEQ_S -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,Y,N, N,N,N,Y), FLT_S -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,Y,N, N,N,N,Y), FLE_S -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,Y,N, N,N,N,Y), FEQ_D -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,Y,N, N,N,N,Y), FLT_D -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,Y,N, N,N,N,Y), FLE_D -> List(Y, Y, fc2oh(FC_F2I) , RT_FIX, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,Y,N, N,N,N,Y), FMIN_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,Y), FMAX_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,Y), FMIN_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,Y), FMAX_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,Y), FADD_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_00 ,X,X,Y,Y,N, N,Y,S,S,N,N,N, Y,N,N,Y), FSUB_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_01 ,X,X,Y,Y,N, N,Y,S,S,N,N,N, Y,N,N,Y), FMUL_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_00 ,X,X,Y,Y,N, N,N,S,S,N,N,N, Y,N,N,Y), FADD_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_00 ,X,X,Y,Y,N, N,Y,D,D,N,N,N, Y,N,N,Y), FSUB_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_01 ,X,X,Y,Y,N, N,Y,D,D,N,N,N, Y,N,N,Y), FMUL_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_00 ,X,X,Y,Y,N, N,N,D,D,N,N,N, Y,N,N,Y), FMADD_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_00 ,X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), FMSUB_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_01 ,X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), FNMADD_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_11 ,X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), FNMSUB_S -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_10 ,X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), FMADD_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_00 ,X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y), FMSUB_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_01 ,X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y), FNMADD_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_11 ,X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y), FNMSUB_D -> List(Y, Y,fc2oh(FC_FPU) , RT_FLT, RT_FLT, RT_FLT, Y, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_10 ,X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y) ) def FDivSqrt_table: Seq[(BitPat, List[BitPat])] = Seq( FDIV_S -> List(Y, Y, fc2oh(FC_FDV) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,X, X,X,S,S,X,X,X, X,Y,N,Y), FDIV_D -> List(Y, Y, fc2oh(FC_FDV) , RT_FLT, RT_FLT, RT_FLT, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,Y,X, X,X,D,D,X,X,X, X,Y,N,Y), FSQRT_S -> List(Y, Y, fc2oh(FC_FDV) , RT_FLT, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,X, X,X,S,S,X,X,X, X,N,Y,Y), FSQRT_D -> List(Y, Y, fc2oh(FC_FDV) , RT_FLT, RT_FLT, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X ,X,X,Y,N,X, X,X,D,D,X,X,X, X,N,Y,Y), ) def B_table: Seq[(BitPat, List[BitPat])] = Seq( SH1ADD -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SH2ADD -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SH3ADD -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SH1ADD_UW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SH2ADD_UW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SH3ADD_UW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ADD_UW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_F3,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ADD , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SLLI_UW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_SL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ANDN -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ANDN, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ORN -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ORN , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), XNOR -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_XNOR, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MAX -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MAX , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MAXU -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MAXU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MIN -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MIN , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), MINU -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_MINU, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ROL -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ROL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ROR -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ROR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), RORI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ROR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CLZ -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CTZ -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CPOP -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ORC_B -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SEXT_B -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), SEXT_H -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ZEXT_H -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), REV8 -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ROLW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ROL , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), RORW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ROR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), RORIW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_32 , FN_ROR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CLZW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_32 ,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CTZW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_32 ,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CPOPW -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_32 ,FN_UNARY, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BCLR -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ANDN, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BCLRI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_ANDN, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BINV -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_XOR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BINVI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_XOR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BSET -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_OR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BSETI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_OR , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BEXT -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_FIX, N, IS_N ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_BEXT, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), BEXTI -> List(Y, N, fc2oh(FC_ALU) , RT_FIX, RT_FIX, RT_X , N, IS_I ,N, N, N, M_X , N, N, CSR.N, DW_XPR, FN_BEXT, X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), ) def RoCC_table: Seq[(BitPat, List[BitPat])] = Seq( // Note: We use fc2oh(FC_CSR) since CSR instructions cannot co-execute with RoCC instructions CUSTOM0 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM0_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM0_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM0_RD -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM0_RD_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM0_RD_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM1 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM1_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM1_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM1_RD -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM1_RD_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM1_RD_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM2 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM2_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM2_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM2_RD -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM2_RD_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM2_RD_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM3 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM3_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM3_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_X , RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM3_RD -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_X , RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM3_RD_RS1 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_X , N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X), CUSTOM3_RD_RS1_RS2 -> List(Y, N, fc2oh(FC_CSR) , RT_FIX, RT_FIX, RT_FIX, N, IS_N, N, N, N, M_X , N, N, CSR.N, DW_X , FN_X , X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X) ) } // scalastyle:on /** * Decoded control signals */ class CtrlSigs(implicit p: Parameters) extends Bundle { val legal = Bool() val fp_val = Bool() val fu_code = UInt(FC_SZ.W) val dst_type = UInt(2.W) val rs1_type = UInt(2.W) val rs2_type = UInt(2.W) val frs3_en = Bool() val imm_sel = UInt(IS_N.getWidth.W) val uses_ldq = Bool() val uses_stq = Bool() val is_amo = Bool() val mem_cmd = UInt(freechips.rocketchip.rocket.M_SZ.W) val inst_unique = Bool() val flush_on_commit = Bool() val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W) val fcn_dw = Bool() val fcn_op = UInt(SZ_ALU_FN.W) val fp = new freechips.rocketchip.tile.FPUCtrlSigs() def decode(inst: UInt, table: Iterable[(BitPat, List[BitPat])]) = { val decoder = freechips.rocketchip.rocket.DecodeLogic(inst, DecodeTables.decode_default, table) val sigs = Seq( legal, fp_val, fu_code, dst_type, rs1_type, rs2_type, frs3_en, imm_sel, uses_ldq, uses_stq, is_amo, mem_cmd, inst_unique, flush_on_commit, csr_cmd, fcn_dw, fcn_op, fp.ldst, fp.wen, fp.ren1, fp.ren2, fp.ren3, fp.swap12, fp.swap23, fp.typeTagIn, fp.typeTagOut, fp.fromint, fp.toint, fp.fastpipe, fp.fma, fp.div, fp.sqrt, fp.wflags ) sigs zip decoder map {case(s,d) => s := d} fp.vec := false.B this } } /** * IO bundle for the Decode unit */ class DecodeUnitIo(implicit p: Parameters) extends BoomBundle { val enq = new Bundle { val uop = Input(new MicroOp()) } val deq = new Bundle { val uop = Output(new MicroOp()) } // from CSRFile val status = Input(new freechips.rocketchip.rocket.MStatus()) val csr_decode = Flipped(new freechips.rocketchip.rocket.CSRDecodeIO) val fcsr_rm = Input(UInt(FPConstants.RM_SZ.W)) val interrupt = Input(Bool()) val interrupt_cause = Input(UInt(xLen.W)) } /** * Decode unit that takes in a single instruction and generates a MicroOp. */ class DecodeUnit(implicit p: Parameters) extends BoomModule with freechips.rocketchip.rocket.constants.MemoryOpConstants with freechips.rocketchip.rocket.constants.ScalarOpConstants { val io = IO(new DecodeUnitIo) val uop = Wire(new MicroOp()) uop := io.enq.uop val decode_table = ( DecodeTables.X_table ++ DecodeTables.F_table ++ DecodeTables.FDivSqrt_table ++ DecodeTables.X64_table ++ DecodeTables.B_table ++ (if (usingRoCC) DecodeTables.RoCC_table else Nil) ) val inst = uop.inst val LDST = inst(RD_MSB,RD_LSB) val LRS1 = inst(RS1_MSB,RS1_LSB) val LRS2 = inst(RS2_MSB,RS2_LSB) val LRS3 = inst(RS3_MSB,RS3_LSB) val cs = Wire(new CtrlSigs()).decode(inst, decode_table) // Exception Handling io.csr_decode.inst := inst val csr_en = cs.csr_cmd.isOneOf(CSR.S, CSR.C, CSR.W) val csr_ren = cs.csr_cmd.isOneOf(CSR.S, CSR.C) && uop.lrs1 === 0.U val system_insn = cs.csr_cmd === CSR.I val sfence = inst === SFENCE_VMA val cs_legal = cs.legal // dontTouch(cs_legal) require (fLen >= 64) val illegal_rm = inst(14,12).isOneOf(5.U,6.U) || (inst(14,12) === 7.U && io.fcsr_rm >= 5.U) val id_illegal_insn = (!cs_legal || (cs.fp_val && (io.csr_decode.fp_illegal || illegal_rm)) || (uop.is_rocc && io.csr_decode.rocc_illegal) || (cs.is_amo && !io.status.isa('a'-'a')) || (csr_en && (io.csr_decode.read_illegal || !csr_ren && io.csr_decode.write_illegal)) || ((sfence || system_insn) && io.csr_decode.system_illegal)) // cs.div && !csr.io.status.isa('m'-'a') || TODO check for illegal div instructions def checkExceptions(x: Seq[(Bool, UInt)]) = (x.map(_._1).reduce(_||_), PriorityMux(x)) val (xcpt_valid, xcpt_cause) = checkExceptions(List( (io.interrupt && !io.enq.uop.is_sfb, io.interrupt_cause), // Disallow interrupts while we are handling a SFB (uop.bp_debug_if, (CSR.debugTriggerCause).U), (uop.bp_xcpt_if, (Causes.breakpoint).U), (uop.xcpt_pf_if, (Causes.fetch_page_fault).U), (uop.xcpt_ae_if, (Causes.fetch_access).U), (id_illegal_insn, (Causes.illegal_instruction).U))) uop.exception := xcpt_valid uop.exc_cause := xcpt_cause //------------------------------------------------------------- uop.is_mov := inst === ADD && LRS1 === 0.U uop.iq_type(IQ_UNQ) := Seq(FC_MUL , FC_DIV, FC_CSR, FC_I2F).map { c => cs.fu_code(c) }.reduce(_||_) uop.iq_type(IQ_ALU) := Seq(FC_ALU ).map { c => cs.fu_code(c) }.reduce(_||_) uop.iq_type(IQ_MEM) := Seq(FC_AGEN, FC_DGEN ).map { c => cs.fu_code(c) }.reduce(_||_) uop.iq_type(IQ_FP ) := Seq(FC_FPU , FC_FDV, FC_F2I ).map { c => cs.fu_code(c) }.reduce(_||_) uop.fu_code := cs.fu_code.asBools uop.ldst := LDST uop.lrs1 := LRS1 uop.lrs2 := LRS2 uop.lrs3 := LRS3 uop.dst_rtype := cs.dst_type uop.lrs1_rtype := Mux(cs.rs1_type === RT_FIX && LRS1 === 0.U, RT_ZERO, cs.rs1_type) uop.lrs2_rtype := Mux(cs.rs2_type === RT_FIX && LRS2 === 0.U, RT_ZERO, cs.rs2_type) uop.frs3_en := cs.frs3_en uop.ldst_is_rs1 := uop.is_sfb_shadow // SFB optimization when (uop.is_sfb_shadow && cs.rs2_type === RT_X) { uop.lrs2_rtype := Mux(LDST === 0.U, RT_ZERO, RT_FIX) uop.lrs2 := LDST uop.ldst_is_rs1 := false.B } .elsewhen (uop.is_sfb_shadow && uop.is_mov) { uop.lrs1 := LDST uop.lrs1_rtype := Mux(LDST === 0.U, RT_ZERO, RT_FIX) uop.ldst_is_rs1 := true.B } uop.fp_val := cs.fp_val uop.fp_ctrl := cs.fp uop.mem_cmd := cs.mem_cmd uop.mem_size := Mux(cs.mem_cmd.isOneOf(M_SFENCE, M_FLUSH_ALL), Cat(LRS2 =/= 0.U, LRS1 =/= 0.U), inst(13,12)) uop.mem_signed := !inst(14) uop.uses_ldq := cs.uses_ldq uop.uses_stq := cs.uses_stq uop.is_amo := cs.is_amo uop.is_fence := inst === FENCE uop.is_fencei := inst === FENCE_I uop.is_sfence := inst === SFENCE_VMA uop.is_sys_pc2epc := inst === EBREAK || inst === ECALL uop.is_eret := inst === ECALL || inst === EBREAK || inst === SRET || inst === MRET || inst === DRET uop.is_unique := cs.inst_unique uop.is_rocc := inst(6,0).isOneOf("b0001011".U, "b0101011".U, "b1111011".U) && inst(14,12).isOneOf(0.U, 2.U, 3.U, 4.U, 6.U, 7.U) uop.flush_on_commit := cs.flush_on_commit || (csr_en && !csr_ren && io.csr_decode.write_flush) //------------------------------------------------------------- // immediates // repackage the immediate, and then pass the fewest number of bits around val di24_20 = Mux(cs.imm_sel === IS_B || cs.imm_sel === IS_S, inst(11,7), inst(24,20)) val imm_packed = Cat(inst(31,25), di24_20, inst(19,12)) val imm = ImmGen(imm_packed, cs.imm_sel) val imm_hi = imm >> (immPregSz-1) val imm_lo = imm(immPregSz-1, 0) val short_imm = imm_hi === 0.U || ~imm_hi === 0.U || cs.imm_sel === IS_F3 uop.imm_rename := cs.imm_sel =/= IS_N && cs.imm_sel =/= IS_F3 uop.imm_packed := imm_packed uop.imm_sel := cs.imm_sel when (short_imm) { uop.imm_rename := false.B uop.imm_sel := IS_SH uop.pimm := Mux(cs.imm_sel === IS_F3, inst(14,12), imm_lo) } uop.fp_rm := Mux(inst(14,12) === 7.U, io.fcsr_rm, inst(14,12)) uop.fp_typ := inst(21,20) //------------------------------------------------------------- uop.csr_cmd := cs.csr_cmd when ((cs.csr_cmd === CSR.S || cs.csr_cmd === CSR.C) && LRS1 === 0.U) { uop.csr_cmd := CSR.R } uop.fcn_dw := cs.fcn_dw uop.fcn_op := cs.fcn_op uop.op1_sel := OP1_RS1 when (inst === LUI || inst === CSRRWI || inst === CSRRSI || inst === CSRRCI || inst === WFI || inst === SRET || inst === MRET || inst === DRET) { uop.op1_sel := OP1_ZERO } .elsewhen (inst === JAL || inst === JALR || inst === AUIPC) { uop.op1_sel := OP1_PC } .elsewhen (Seq(SH1ADD, SH2ADD, SH3ADD, SH1ADD_UW, SH2ADD_UW, SH3ADD_UW, ADD_UW, SLLI_UW).map(_ === inst).orR) { uop.op1_sel := OP1_RS1SHL } uop.op2_sel := OP2_RS2 when (cs.is_amo || inst === CSRRW || inst === CSRRS || inst === CSRRC) { uop.op2_sel := OP2_ZERO } .elsewhen (inst === CSRRWI || inst === CSRRSI || inst === CSRRCI || inst === WFI || inst === SRET || inst === DRET || inst === MRET) { uop.op2_sel := OP2_IMMC } .elsewhen (inst === JAL || inst === JALR) { uop.op2_sel := OP2_NEXT } .elsewhen (Seq(BCLR, BCLRI, BINV, BINVI, BSET, BSETI).map(_ === inst).orR) { uop.op2_sel := Mux(uop.lrs2_rtype === RT_FIX, OP2_RS2OH, OP2_IMMOH) } .elsewhen (cs.imm_sel === IS_U || cs.imm_sel === IS_I || cs.imm_sel === IS_S) { uop.op2_sel := OP2_IMM } uop.br_type := Seq( (BEQ , B_EQ ), (BNE , B_NE ), (BGE , B_GE ), (BGEU , B_GEU), (BLT , B_LT ), (BLTU , B_LTU), (JAL , B_J ), (JALR , B_JR ) ) .map { case (c, b) => Mux(inst === c, b, 0.U) } .reduce(_|_) io.deq.uop := uop } /** * Smaller Decode unit for the Frontend to decode different * branches. * Accepts EXPANDED RVC instructions */ class BranchDecodeSignals(implicit p: Parameters) extends BoomBundle { val is_ret = Bool() val is_call = Bool() val target = UInt(vaddrBitsExtended.W) val cfi_type = UInt(CFI_SZ.W) // Is this branch a short forwards jump? val sfb_offset = Valid(UInt(log2Ceil(icBlockBytes).W)) // Is this instruction allowed to be inside a sfb? val shadowable = Bool() } class BranchDecode(implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { val inst = Input(UInt(32.W)) val pc = Input(UInt(vaddrBitsExtended.W)) val out = Output(new BranchDecodeSignals) }) val bpd_csignals = freechips.rocketchip.rocket.DecodeLogic(io.inst, List[BitPat](N, N, N, N, X), //// is br? //// | is jal? //// | | is jalr? //// | | | //// | | | shadowable //// | | | | has_rs2 //// | | | | | Seq[(BitPat, List[BitPat])]( JAL -> List(N, Y, N, N, X), JALR -> List(N, N, Y, N, X), BEQ -> List(Y, N, N, N, X), BNE -> List(Y, N, N, N, X), BGE -> List(Y, N, N, N, X), BGEU -> List(Y, N, N, N, X), BLT -> List(Y, N, N, N, X), BLTU -> List(Y, N, N, N, X), SLLI -> List(N, N, N, Y, N), SRLI -> List(N, N, N, Y, N), SRAI -> List(N, N, N, Y, N), ADDIW -> List(N, N, N, Y, N), SLLIW -> List(N, N, N, Y, N), SRAIW -> List(N, N, N, Y, N), SRLIW -> List(N, N, N, Y, N), ADDW -> List(N, N, N, Y, Y), SUBW -> List(N, N, N, Y, Y), SLLW -> List(N, N, N, Y, Y), SRAW -> List(N, N, N, Y, Y), SRLW -> List(N, N, N, Y, Y), LUI -> List(N, N, N, Y, N), ADDI -> List(N, N, N, Y, N), ANDI -> List(N, N, N, Y, N), ORI -> List(N, N, N, Y, N), XORI -> List(N, N, N, Y, N), SLTI -> List(N, N, N, Y, N), SLTIU -> List(N, N, N, Y, N), SLL -> List(N, N, N, Y, Y), ADD -> List(N, N, N, Y, Y), SUB -> List(N, N, N, Y, Y), SLT -> List(N, N, N, Y, Y), SLTU -> List(N, N, N, Y, Y), AND -> List(N, N, N, Y, Y), OR -> List(N, N, N, Y, Y), XOR -> List(N, N, N, Y, Y), SRA -> List(N, N, N, Y, Y), SRL -> List(N, N, N, Y, Y) )) val cs_is_br = bpd_csignals(0)(0) val cs_is_jal = bpd_csignals(1)(0) val cs_is_jalr = bpd_csignals(2)(0) val cs_is_shadowable = bpd_csignals(3)(0) val cs_has_rs2 = bpd_csignals(4)(0) io.out.is_call := (cs_is_jal || cs_is_jalr) && GetRd(io.inst) === RA io.out.is_ret := cs_is_jalr && GetRs1(io.inst) === BitPat("b00?01") && GetRd(io.inst) === X0 io.out.target := Mux(cs_is_br, ComputeBranchTarget(io.pc, io.inst, xLen), ComputeJALTarget(io.pc, io.inst, xLen)) io.out.cfi_type := Mux(cs_is_jalr, CFI_JALR, Mux(cs_is_jal, CFI_JAL, Mux(cs_is_br, CFI_BR, CFI_X))) val br_offset = Cat(io.inst(7), io.inst(30,25), io.inst(11,8), 0.U(1.W)) // Is a sfb if it points forwards (offset is positive) io.out.sfb_offset.valid := cs_is_br && !io.inst(31) && br_offset =/= 0.U && (br_offset >> log2Ceil(icBlockBytes)) === 0.U io.out.sfb_offset.bits := br_offset io.out.shadowable := cs_is_shadowable && ( !cs_has_rs2 || (GetRs1(io.inst) === GetRd(io.inst)) || (io.inst === ADD && GetRs1(io.inst) === X0) ) } /** * Track the current "branch mask", and give out the branch mask to each micro-op in Decode * (each micro-op in the machine has a branch mask which says which branches it * is being speculated under). * * @param pl_width pipeline width for the processor */ class BranchMaskGenerationLogic(val pl_width: Int)(implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { // guess if the uop is a branch (we'll catch this later) val is_branch = Input(Vec(pl_width, Bool())) // lock in that it's actually a branch and will fire, so we update // the branch_masks. val will_fire = Input(Vec(pl_width, Bool())) // give out tag immediately (needed in rename) // mask can come later in the cycle val br_tag = Output(Vec(pl_width, UInt(brTagSz.W))) val br_mask = Output(Vec(pl_width, UInt(maxBrCount.W))) // tell decoders the branch mask has filled up, but on the granularity // of an individual micro-op (so some micro-ops can go through) val is_full = Output(Vec(pl_width, Bool())) val brupdate = Input(new BrUpdateInfo()) val flush_pipeline = Input(Bool()) val debug_branch_mask = Output(UInt(maxBrCount.W)) }) val branch_mask = RegInit(0.U(maxBrCount.W)) //------------------------------------------------------------- // Give out the branch tag to each branch micro-op var allocate_mask = branch_mask val tag_masks = Wire(Vec(pl_width, UInt(maxBrCount.W))) for (w <- 0 until pl_width) { // TODO this is a loss of performance as we're blocking branches based on potentially fake branches io.is_full(w) := (allocate_mask === ~(0.U(maxBrCount.W))) && io.is_branch(w) // find br_tag and compute next br_mask val new_br_tag = Wire(UInt(brTagSz.W)) new_br_tag := 0.U tag_masks(w) := 0.U for (i <- maxBrCount-1 to 0 by -1) { when (~allocate_mask(i)) { new_br_tag := i.U tag_masks(w) := (1.U << i.U) } } io.br_tag(w) := new_br_tag allocate_mask = Mux(io.is_branch(w), tag_masks(w) | allocate_mask, allocate_mask) } //------------------------------------------------------------- // Give out the branch mask to each micro-op // (kill off the bits that corresponded to branches that aren't going to fire) var curr_mask = branch_mask for (w <- 0 until pl_width) { io.br_mask(w) := GetNewBrMask(io.brupdate, curr_mask) curr_mask = Mux(io.will_fire(w), tag_masks(w) | curr_mask, curr_mask) } //------------------------------------------------------------- // Update the current branch_mask when (io.flush_pipeline) { branch_mask := 0.U } .otherwise { val mask = Mux(io.brupdate.b2.mispredict, io.brupdate.b2.uop.br_mask, ~(0.U(maxBrCount.W))) branch_mask := GetNewBrMask(io.brupdate, curr_mask) & mask } io.debug_branch_mask := branch_mask } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v4.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 3 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_4 = 3.U(BSRC_SZ.W) // 4-cycle branch pred val BSRC_C = 4.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val B_N = 0.U(4.W) // Next val B_NE = 1.U(4.W) // Branch on NotEqual val B_EQ = 2.U(4.W) // Branch on Equal val B_GE = 3.U(4.W) // Branch on Greater/Equal val B_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val B_LT = 5.U(4.W) // Branch on Less Than val B_LTU = 6.U(4.W) // Branch on Less Than Unsigned val B_J = 7.U(4.W) // Jump val B_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_RS1SHL = 3.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_RS2OH = 5.U(3.W) val OP2_IMMOH = 6.U(3.W) val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_SH = 5.U(3.W) // short-type (sign extend from pimm to get imm) val IS_N = 6.U(3.W) // No immediate (zeros immediate) val IS_F3 = 7.U(3.W) // funct3 // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_X = 2.U(2.W) // not-a-register (prs1 = lrs1 special case) val RT_ZERO = 3.U(2.W) // IQT type val IQ_SZ = 4 val IQ_MEM = 0 val IQ_UNQ = 1 val IQ_ALU = 2 val IQ_FP = 3 // Functional unit select // bit mask, since a given execution pipeline may support multiple functional units val FC_SZ = 10 val FC_ALU = 0 val FC_AGEN = 1 val FC_DGEN = 2 val FC_MUL = 3 val FC_DIV = 4 val FC_CSR = 5 val FC_FPU = 6 val FC_FDV = 7 val FC_I2F = 8 val FC_F2I = 9 def NullMicroOp(implicit p: Parameters) = 0.U.asTypeOf(new boom.v4.common.MicroOp) } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v4.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) }
module BranchDecode( // @[decode.scala:629:7] input clock, // @[decode.scala:629:7] input reset, // @[decode.scala:629:7] input [31:0] io_inst, // @[decode.scala:631:14] input [39:0] io_pc, // @[decode.scala:631:14] output io_out_is_ret, // @[decode.scala:631:14] output io_out_is_call, // @[decode.scala:631:14] output [39:0] io_out_target, // @[decode.scala:631:14] output [2:0] io_out_cfi_type, // @[decode.scala:631:14] output io_out_sfb_offset_valid, // @[decode.scala:631:14] output [5:0] io_out_sfb_offset_bits, // @[decode.scala:631:14] output io_out_shadowable // @[decode.scala:631:14] ); wire [31:0] io_inst_0 = io_inst; // @[decode.scala:629:7] wire [39:0] io_pc_0 = io_pc; // @[decode.scala:629:7] wire [31:0] bpd_csignals_decoded_plaInput = io_inst_0; // @[pla.scala:77:22] wire _io_out_is_ret_T_6; // @[decode.scala:701:72] wire [39:0] _io_out_target_T = io_pc_0; // @[decode.scala:629:7] wire [39:0] _io_out_target_T_8 = io_pc_0; // @[decode.scala:629:7] wire _io_out_is_call_T_3; // @[decode.scala:700:47] wire [39:0] _io_out_target_T_16; // @[decode.scala:703:23] wire [2:0] _io_out_cfi_type_T_2; // @[decode.scala:706:8] wire _io_out_sfb_offset_valid_T_7; // @[decode.scala:716:76] wire _io_out_shadowable_T_11; // @[decode.scala:718:41] wire io_out_sfb_offset_valid_0; // @[decode.scala:629:7] wire [5:0] io_out_sfb_offset_bits_0; // @[decode.scala:629:7] wire io_out_is_ret_0; // @[decode.scala:629:7] wire io_out_is_call_0; // @[decode.scala:629:7] wire [39:0] io_out_target_0; // @[decode.scala:629:7] wire [2:0] io_out_cfi_type_0; // @[decode.scala:629:7] wire io_out_shadowable_0; // @[decode.scala:629:7] wire [31:0] bpd_csignals_decoded_invInputs = ~bpd_csignals_decoded_plaInput; // @[pla.scala:77:22, :78:21] wire [4:0] bpd_csignals_decoded_invMatrixOutputs; // @[pla.scala:120:37] wire [4:0] bpd_csignals_decoded; // @[pla.scala:81:23] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_1 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_2 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_3 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_4 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_5 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_6 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_7 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_8 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_9 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_10 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_11 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_12 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_13 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_14 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_15 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_1 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_2 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_3 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_4 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_5 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_6 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_7 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_8 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_9 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_10 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_11 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_12 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_13 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_14 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_15 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_1 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_2 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_3 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_4 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_6 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_9 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_10 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_11 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_12 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_13 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_14 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_15 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_3 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_5 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_6 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_7 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_9 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_11 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_12 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_13 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_1 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_2 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_3 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_4 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_5 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_9 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_10 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_11 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_13 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_14 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_15 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_1 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_9 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_11 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_13 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_1 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_2 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_3 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_4 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_5 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_9 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_10 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_11 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_13 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_14 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_15 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_1 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_2 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_6 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo = {bpd_csignals_decoded_andMatrixOutputs_lo_hi, bpd_csignals_decoded_andMatrixOutputs_lo_lo}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi = {bpd_csignals_decoded_andMatrixOutputs_hi_hi, bpd_csignals_decoded_andMatrixOutputs_hi_lo}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T = {bpd_csignals_decoded_andMatrixOutputs_hi, bpd_csignals_decoded_andMatrixOutputs_lo}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_5_2 = &_bpd_csignals_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_1 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_2 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_4 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_5 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_4 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_5 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_8 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_7 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_12 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_13 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_1 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_4 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_3 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_1}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_lo_1}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_1}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_1}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_1}; // @[pla.scala:91:29, :98:53] wire [4:0] bpd_csignals_decoded_andMatrixOutputs_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_hi_lo_1}; // @[pla.scala:98:53] wire [8:0] _bpd_csignals_decoded_andMatrixOutputs_T_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_1}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_9_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_2 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_3 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_4 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_5 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_6 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_7 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_8 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_12 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_15 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_3 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_3 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_6 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_7 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_8 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_2 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_2 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_4 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_5 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_5 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_6 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_7 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_1 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_2 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_3 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_4 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_5 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_6 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_7 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_1 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_2 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_3 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_4 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_5 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_6 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_7 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_1 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_2 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_3 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_4 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_5 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_6 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_7 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_1 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_1 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_2 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_3 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_4 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_5 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_6 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_lo_2}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_2}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_2}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_2}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_2}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_lo_2}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_2}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_14_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_2; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_1 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_2 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_3 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_4 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_1}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_1}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_1}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_2}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_1}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_lo_lo_3}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_3}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_3}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_1}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_lo_3}; // @[pla.scala:98:53] wire [13:0] _bpd_csignals_decoded_andMatrixOutputs_T_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_3, bpd_csignals_decoded_andMatrixOutputs_lo_3}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_0_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_3; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_2}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_1}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_2}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_2}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_2}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_lo_4}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_4}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_4}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_1}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_4}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_4}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_2}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_hi_lo_4}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_4}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_2_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_4; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_5 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_7 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_8 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_5}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_5}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_5}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_lo_5}; // @[pla.scala:98:53] wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_5 = {bpd_csignals_decoded_andMatrixOutputs_hi_5, bpd_csignals_decoded_andMatrixOutputs_lo_5}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_12_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_5; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_6 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_7 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_8 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_12 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_6 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_7 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_8 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_12 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_6}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_lo_5}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_6}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_6}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_lo_6}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_6 = {bpd_csignals_decoded_andMatrixOutputs_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_6}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_6_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_6; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_7}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_6}; // @[pla.scala:91:29, :98:53] wire [4:0] bpd_csignals_decoded_andMatrixOutputs_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_lo_6}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_7}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_7}; // @[pla.scala:90:45, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_7}; // @[pla.scala:90:45, :98:53] wire [4:0] bpd_csignals_decoded_andMatrixOutputs_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_lo_7}; // @[pla.scala:98:53] wire [9:0] _bpd_csignals_decoded_andMatrixOutputs_T_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_7}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_15_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_7; // @[pla.scala:98:{53,70}] wire _bpd_csignals_decoded_orMatrixOutputs_T_4 = bpd_csignals_decoded_andMatrixOutputs_15_2; // @[pla.scala:98:70, :114:36] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_8 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_10 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_14 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_8}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_8}; // @[pla.scala:90:45, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_8}; // @[pla.scala:90:45, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_8}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_hi_lo_8}; // @[pla.scala:98:53] wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_8 = {bpd_csignals_decoded_andMatrixOutputs_hi_8, bpd_csignals_decoded_andMatrixOutputs_lo_8}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_11_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_8; // @[pla.scala:98:{53,70}] wire _bpd_csignals_decoded_orMatrixOutputs_T_5 = bpd_csignals_decoded_andMatrixOutputs_11_2; // @[pla.scala:98:70, :114:36] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_7 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_10 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_11 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_14 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_15 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_3}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_2}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_4}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_3}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_9, bpd_csignals_decoded_andMatrixOutputs_lo_lo_7}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_7}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_9}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_2}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_9}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_9}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_3}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_9, bpd_csignals_decoded_andMatrixOutputs_hi_lo_9}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_9, bpd_csignals_decoded_andMatrixOutputs_lo_9}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_3_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_9; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_4}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_3}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_4}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_5}; // @[pla.scala:91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_4}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_10, bpd_csignals_decoded_andMatrixOutputs_lo_lo_8}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_8}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_10}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_3}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_10}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_10}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_4}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_10, bpd_csignals_decoded_andMatrixOutputs_hi_lo_10}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_10, bpd_csignals_decoded_andMatrixOutputs_lo_10}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_7_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_10; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_9 = bpd_csignals_decoded_plaInput[13]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_9}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_11}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_11, bpd_csignals_decoded_andMatrixOutputs_lo_lo_9}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_11}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_11}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_11, bpd_csignals_decoded_andMatrixOutputs_hi_lo_11}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_11 = {bpd_csignals_decoded_andMatrixOutputs_hi_11, bpd_csignals_decoded_andMatrixOutputs_lo_11}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_1_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_11; // @[pla.scala:98:{53,70}] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_10 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_6 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_8 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_9 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_10}; // @[pla.scala:90:45, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_12}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_12, bpd_csignals_decoded_andMatrixOutputs_lo_lo_10}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_12}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_12}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_12, bpd_csignals_decoded_andMatrixOutputs_hi_lo_12}; // @[pla.scala:98:53] wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_12 = {bpd_csignals_decoded_andMatrixOutputs_hi_12, bpd_csignals_decoded_andMatrixOutputs_lo_12}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_13_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_12; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_5}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_4}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_5}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_6}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_5}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_13, bpd_csignals_decoded_andMatrixOutputs_lo_lo_11}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_11}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_13}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_4}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_13}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_13}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_5}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_13, bpd_csignals_decoded_andMatrixOutputs_hi_lo_13}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_13, bpd_csignals_decoded_andMatrixOutputs_lo_13}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_4_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_13; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_6}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_5}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_6}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_7}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_6}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_14 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_14, bpd_csignals_decoded_andMatrixOutputs_lo_lo_12}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_12}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_14}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_5}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_14}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_14}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_6}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_14, bpd_csignals_decoded_andMatrixOutputs_hi_lo_14}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_14, bpd_csignals_decoded_andMatrixOutputs_lo_14}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_8_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_14; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_7}; // @[pla.scala:91:29, :98:53] wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_6}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_7}; // @[pla.scala:91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_8}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_7}; // @[pla.scala:98:53] wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_15 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_15, bpd_csignals_decoded_andMatrixOutputs_lo_lo_13}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_13}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_15}; // @[pla.scala:90:45, :91:29, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_6}; // @[pla.scala:98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_15}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_15}; // @[pla.scala:90:45, :98:53] wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_9, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_7}; // @[pla.scala:98:53] wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_15, bpd_csignals_decoded_andMatrixOutputs_hi_lo_15}; // @[pla.scala:98:53] wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_15, bpd_csignals_decoded_andMatrixOutputs_lo_15}; // @[pla.scala:98:53] wire bpd_csignals_decoded_andMatrixOutputs_10_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_15; // @[pla.scala:98:{53,70}] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo = {bpd_csignals_decoded_andMatrixOutputs_2_2, bpd_csignals_decoded_andMatrixOutputs_10_2}; // @[pla.scala:98:70, :114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi = {bpd_csignals_decoded_andMatrixOutputs_14_2, bpd_csignals_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19] wire [3:0] _bpd_csignals_decoded_orMatrixOutputs_T = {bpd_csignals_decoded_orMatrixOutputs_hi, bpd_csignals_decoded_orMatrixOutputs_lo}; // @[pla.scala:114:19] wire _bpd_csignals_decoded_orMatrixOutputs_T_1 = |_bpd_csignals_decoded_orMatrixOutputs_T; // @[pla.scala:114:{19,36}] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_8_2, bpd_csignals_decoded_andMatrixOutputs_10_2}; // @[pla.scala:98:70, :114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_7_2, bpd_csignals_decoded_andMatrixOutputs_1_2}; // @[pla.scala:98:70, :114:19] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_lo_hi = {bpd_csignals_decoded_orMatrixOutputs_lo_hi_hi, bpd_csignals_decoded_andMatrixOutputs_4_2}; // @[pla.scala:98:70, :114:19] wire [4:0] bpd_csignals_decoded_orMatrixOutputs_lo_1 = {bpd_csignals_decoded_orMatrixOutputs_lo_hi, bpd_csignals_decoded_orMatrixOutputs_lo_lo}; // @[pla.scala:114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_0_2, bpd_csignals_decoded_andMatrixOutputs_12_2}; // @[pla.scala:98:70, :114:19] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_lo = {bpd_csignals_decoded_orMatrixOutputs_hi_lo_hi, bpd_csignals_decoded_andMatrixOutputs_3_2}; // @[pla.scala:98:70, :114:19] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_5_2, bpd_csignals_decoded_andMatrixOutputs_9_2}; // @[pla.scala:98:70, :114:19] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi = {bpd_csignals_decoded_orMatrixOutputs_hi_hi_hi, bpd_csignals_decoded_andMatrixOutputs_14_2}; // @[pla.scala:98:70, :114:19] wire [5:0] bpd_csignals_decoded_orMatrixOutputs_hi_1 = {bpd_csignals_decoded_orMatrixOutputs_hi_hi, bpd_csignals_decoded_orMatrixOutputs_hi_lo}; // @[pla.scala:114:19] wire [10:0] _bpd_csignals_decoded_orMatrixOutputs_T_2 = {bpd_csignals_decoded_orMatrixOutputs_hi_1, bpd_csignals_decoded_orMatrixOutputs_lo_1}; // @[pla.scala:114:19] wire _bpd_csignals_decoded_orMatrixOutputs_T_3 = |_bpd_csignals_decoded_orMatrixOutputs_T_2; // @[pla.scala:114:{19,36}] wire [1:0] _bpd_csignals_decoded_orMatrixOutputs_T_6 = {bpd_csignals_decoded_andMatrixOutputs_6_2, bpd_csignals_decoded_andMatrixOutputs_13_2}; // @[pla.scala:98:70, :114:19] wire _bpd_csignals_decoded_orMatrixOutputs_T_7 = |_bpd_csignals_decoded_orMatrixOutputs_T_6; // @[pla.scala:114:{19,36}] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_2 = {_bpd_csignals_decoded_orMatrixOutputs_T_3, _bpd_csignals_decoded_orMatrixOutputs_T_1}; // @[pla.scala:102:36, :114:36] wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi_1 = {_bpd_csignals_decoded_orMatrixOutputs_T_7, _bpd_csignals_decoded_orMatrixOutputs_T_5}; // @[pla.scala:102:36, :114:36] wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_2 = {bpd_csignals_decoded_orMatrixOutputs_hi_hi_1, _bpd_csignals_decoded_orMatrixOutputs_T_4}; // @[pla.scala:102:36, :114:36] wire [4:0] bpd_csignals_decoded_orMatrixOutputs = {bpd_csignals_decoded_orMatrixOutputs_hi_2, bpd_csignals_decoded_orMatrixOutputs_lo_2}; // @[pla.scala:102:36] wire _bpd_csignals_decoded_invMatrixOutputs_T = bpd_csignals_decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_1 = bpd_csignals_decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_2 = bpd_csignals_decoded_orMatrixOutputs[2]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_3 = bpd_csignals_decoded_orMatrixOutputs[3]; // @[pla.scala:102:36, :124:31] wire _bpd_csignals_decoded_invMatrixOutputs_T_4 = bpd_csignals_decoded_orMatrixOutputs[4]; // @[pla.scala:102:36, :124:31] wire [1:0] bpd_csignals_decoded_invMatrixOutputs_lo = {_bpd_csignals_decoded_invMatrixOutputs_T_1, _bpd_csignals_decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31] wire [1:0] bpd_csignals_decoded_invMatrixOutputs_hi_hi = {_bpd_csignals_decoded_invMatrixOutputs_T_4, _bpd_csignals_decoded_invMatrixOutputs_T_3}; // @[pla.scala:120:37, :124:31] wire [2:0] bpd_csignals_decoded_invMatrixOutputs_hi = {bpd_csignals_decoded_invMatrixOutputs_hi_hi, _bpd_csignals_decoded_invMatrixOutputs_T_2}; // @[pla.scala:120:37, :124:31] assign bpd_csignals_decoded_invMatrixOutputs = {bpd_csignals_decoded_invMatrixOutputs_hi, bpd_csignals_decoded_invMatrixOutputs_lo}; // @[pla.scala:120:37] assign bpd_csignals_decoded = bpd_csignals_decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37] wire bpd_csignals_0 = bpd_csignals_decoded[4]; // @[pla.scala:81:23] wire cs_is_br = bpd_csignals_0; // @[Decode.scala:50:77] wire bpd_csignals_1 = bpd_csignals_decoded[3]; // @[pla.scala:81:23] wire cs_is_jal = bpd_csignals_1; // @[Decode.scala:50:77] wire bpd_csignals_2 = bpd_csignals_decoded[2]; // @[pla.scala:81:23] wire cs_is_jalr = bpd_csignals_2; // @[Decode.scala:50:77] wire bpd_csignals_3 = bpd_csignals_decoded[1]; // @[pla.scala:81:23] wire cs_is_shadowable = bpd_csignals_3; // @[Decode.scala:50:77] wire bpd_csignals_4 = bpd_csignals_decoded[0]; // @[pla.scala:81:23] wire cs_has_rs2 = bpd_csignals_4; // @[Decode.scala:50:77] wire _io_out_is_call_T = cs_is_jal | cs_is_jalr; // @[decode.scala:695:34, :696:35, :700:32] wire [4:0] _io_out_is_call_T_1 = io_inst_0[11:7]; // @[decode.scala:629:7] wire [4:0] _io_out_is_ret_T_4 = io_inst_0[11:7]; // @[decode.scala:629:7] wire [4:0] _io_out_shadowable_T_2 = io_inst_0[11:7]; // @[decode.scala:629:7] wire _io_out_is_call_T_2 = _io_out_is_call_T_1 == 5'h1; // @[decode.scala:700:65] assign _io_out_is_call_T_3 = _io_out_is_call_T & _io_out_is_call_T_2; // @[decode.scala:700:{32,47,65}] assign io_out_is_call_0 = _io_out_is_call_T_3; // @[decode.scala:629:7, :700:47] wire [4:0] _io_out_is_ret_T = io_inst_0[19:15]; // @[decode.scala:629:7] wire [4:0] _io_out_shadowable_T_1 = io_inst_0[19:15]; // @[decode.scala:629:7] wire [4:0] _io_out_shadowable_T_7 = io_inst_0[19:15]; // @[decode.scala:629:7] wire [4:0] _io_out_is_ret_T_1 = _io_out_is_ret_T & 5'h1B; // @[decode.scala:701:51] wire _io_out_is_ret_T_2 = _io_out_is_ret_T_1 == 5'h1; // @[decode.scala:701:51] wire _io_out_is_ret_T_3 = cs_is_jalr & _io_out_is_ret_T_2; // @[decode.scala:696:35, :701:{32,51}] wire _io_out_is_ret_T_5 = _io_out_is_ret_T_4 == 5'h0; // @[decode.scala:701:90] assign _io_out_is_ret_T_6 = _io_out_is_ret_T_3 & _io_out_is_ret_T_5; // @[decode.scala:701:{32,72,90}] assign io_out_is_ret_0 = _io_out_is_ret_T_6; // @[decode.scala:629:7, :701:72] wire _io_out_target_b_imm32_T = io_inst_0[31]; // @[decode.scala:629:7] wire _io_out_target_j_imm32_T = io_inst_0[31]; // @[decode.scala:629:7] wire _io_out_sfb_offset_valid_T = io_inst_0[31]; // @[decode.scala:629:7, :716:50] wire [19:0] _io_out_target_b_imm32_T_1 = {20{_io_out_target_b_imm32_T}}; // @[consts.scala:189:{27,35}] wire _io_out_target_b_imm32_T_2 = io_inst_0[7]; // @[decode.scala:629:7] wire _br_offset_T = io_inst_0[7]; // @[decode.scala:629:7, :714:30] wire [5:0] _io_out_target_b_imm32_T_3 = io_inst_0[30:25]; // @[decode.scala:629:7] wire [5:0] _io_out_target_j_imm32_T_4 = io_inst_0[30:25]; // @[decode.scala:629:7] wire [5:0] _br_offset_T_1 = io_inst_0[30:25]; // @[decode.scala:629:7, :714:42] wire [3:0] _io_out_target_b_imm32_T_4 = io_inst_0[11:8]; // @[decode.scala:629:7] wire [3:0] _br_offset_T_2 = io_inst_0[11:8]; // @[decode.scala:629:7, :714:58] wire [4:0] io_out_target_b_imm32_lo = {_io_out_target_b_imm32_T_4, 1'h0}; // @[consts.scala:189:{22,68}] wire [20:0] io_out_target_b_imm32_hi_hi = {_io_out_target_b_imm32_T_1, _io_out_target_b_imm32_T_2}; // @[consts.scala:189:{22,27,46}] wire [26:0] io_out_target_b_imm32_hi = {io_out_target_b_imm32_hi_hi, _io_out_target_b_imm32_T_3}; // @[consts.scala:189:{22,55}] wire [31:0] io_out_target_b_imm32 = {io_out_target_b_imm32_hi, io_out_target_b_imm32_lo}; // @[consts.scala:189:22] wire [31:0] _io_out_target_T_1 = io_out_target_b_imm32; // @[consts.scala:189:22, :190:27] wire [40:0] _io_out_target_T_2 = {_io_out_target_T[39], _io_out_target_T} + {{9{_io_out_target_T_1[31]}}, _io_out_target_T_1}; // @[consts.scala:190:{10,17,27}] wire [39:0] _io_out_target_T_3 = _io_out_target_T_2[39:0]; // @[consts.scala:190:17] wire [39:0] _io_out_target_T_4 = _io_out_target_T_3; // @[consts.scala:190:17] wire [39:0] _io_out_target_T_5 = _io_out_target_T_4 & 40'hFFFFFFFFFE; // @[consts.scala:190:{17,42}] wire [39:0] _io_out_target_T_6 = _io_out_target_T_5; // @[consts.scala:190:42] wire [39:0] _io_out_target_T_7 = _io_out_target_T_6; // @[consts.scala:190:{42,52}] wire [11:0] _io_out_target_j_imm32_T_1 = {12{_io_out_target_j_imm32_T}}; // @[consts.scala:195:{27,35}] wire [7:0] _io_out_target_j_imm32_T_2 = io_inst_0[19:12]; // @[decode.scala:629:7] wire _io_out_target_j_imm32_T_3 = io_inst_0[20]; // @[decode.scala:629:7] wire [3:0] _io_out_target_j_imm32_T_5 = io_inst_0[24:21]; // @[decode.scala:629:7] wire [9:0] io_out_target_j_imm32_lo_hi = {_io_out_target_j_imm32_T_4, _io_out_target_j_imm32_T_5}; // @[consts.scala:195:{22,69,82}] wire [10:0] io_out_target_j_imm32_lo = {io_out_target_j_imm32_lo_hi, 1'h0}; // @[consts.scala:195:22] wire [19:0] io_out_target_j_imm32_hi_hi = {_io_out_target_j_imm32_T_1, _io_out_target_j_imm32_T_2}; // @[consts.scala:195:{22,27,46}] wire [20:0] io_out_target_j_imm32_hi = {io_out_target_j_imm32_hi_hi, _io_out_target_j_imm32_T_3}; // @[consts.scala:195:{22,59}] wire [31:0] io_out_target_j_imm32 = {io_out_target_j_imm32_hi, io_out_target_j_imm32_lo}; // @[consts.scala:195:22] wire [31:0] _io_out_target_T_9 = io_out_target_j_imm32; // @[consts.scala:195:22, :196:27] wire [40:0] _io_out_target_T_10 = {_io_out_target_T_8[39], _io_out_target_T_8} + {{9{_io_out_target_T_9[31]}}, _io_out_target_T_9}; // @[consts.scala:196:{10,17,27}] wire [39:0] _io_out_target_T_11 = _io_out_target_T_10[39:0]; // @[consts.scala:196:17] wire [39:0] _io_out_target_T_12 = _io_out_target_T_11; // @[consts.scala:196:17] wire [39:0] _io_out_target_T_13 = _io_out_target_T_12 & 40'hFFFFFFFFFE; // @[consts.scala:196:{17,42}] wire [39:0] _io_out_target_T_14 = _io_out_target_T_13; // @[consts.scala:196:42] wire [39:0] _io_out_target_T_15 = _io_out_target_T_14; // @[consts.scala:196:{42,52}] assign _io_out_target_T_16 = cs_is_br ? _io_out_target_T_7 : _io_out_target_T_15; // @[decode.scala:694:33, :703:23] assign io_out_target_0 = _io_out_target_T_16; // @[decode.scala:629:7, :703:23] wire [2:0] _io_out_cfi_type_T = {2'h0, cs_is_br}; // @[decode.scala:694:33, :710:8] wire [2:0] _io_out_cfi_type_T_1 = cs_is_jal ? 3'h2 : _io_out_cfi_type_T; // @[decode.scala:695:34, :708:8, :710:8] assign _io_out_cfi_type_T_2 = cs_is_jalr ? 3'h3 : _io_out_cfi_type_T_1; // @[decode.scala:696:35, :706:8, :708:8] assign io_out_cfi_type_0 = _io_out_cfi_type_T_2; // @[decode.scala:629:7, :706:8] wire [4:0] br_offset_lo = {_br_offset_T_2, 1'h0}; // @[decode.scala:714:{22,58}] wire [6:0] br_offset_hi = {_br_offset_T, _br_offset_T_1}; // @[decode.scala:714:{22,30,42}] wire [11:0] br_offset = {br_offset_hi, br_offset_lo}; // @[decode.scala:714:22] wire _io_out_sfb_offset_valid_T_1 = ~_io_out_sfb_offset_valid_T; // @[decode.scala:716:{42,50}] wire _io_out_sfb_offset_valid_T_2 = cs_is_br & _io_out_sfb_offset_valid_T_1; // @[decode.scala:694:33, :716:{39,42}] wire _io_out_sfb_offset_valid_T_3 = |br_offset; // @[decode.scala:714:22, :716:68] wire _io_out_sfb_offset_valid_T_4 = _io_out_sfb_offset_valid_T_2 & _io_out_sfb_offset_valid_T_3; // @[decode.scala:716:{39,55,68}] wire [5:0] _io_out_sfb_offset_valid_T_5 = br_offset[11:6]; // @[decode.scala:714:22, :716:90] wire _io_out_sfb_offset_valid_T_6 = _io_out_sfb_offset_valid_T_5 == 6'h0; // @[decode.scala:716:{90,117}] assign _io_out_sfb_offset_valid_T_7 = _io_out_sfb_offset_valid_T_4 & _io_out_sfb_offset_valid_T_6; // @[decode.scala:716:{55,76,117}] assign io_out_sfb_offset_valid_0 = _io_out_sfb_offset_valid_T_7; // @[decode.scala:629:7, :716:76] assign io_out_sfb_offset_bits_0 = br_offset[5:0]; // @[decode.scala:629:7, :714:22, :717:27] wire _io_out_shadowable_T = ~cs_has_rs2; // @[decode.scala:698:35, :719:5] wire _io_out_shadowable_T_3 = _io_out_shadowable_T_1 == _io_out_shadowable_T_2; // @[decode.scala:720:22] wire _io_out_shadowable_T_4 = _io_out_shadowable_T | _io_out_shadowable_T_3; // @[decode.scala:719:{5,17}, :720:22] wire [31:0] _io_out_shadowable_T_5 = io_inst_0 & 32'hFE00707F; // @[decode.scala:629:7, :721:14] wire _io_out_shadowable_T_6 = _io_out_shadowable_T_5 == 32'h33; // @[decode.scala:721:14] wire _io_out_shadowable_T_8 = _io_out_shadowable_T_7 == 5'h0; // @[decode.scala:701:90, :721:41] wire _io_out_shadowable_T_9 = _io_out_shadowable_T_6 & _io_out_shadowable_T_8; // @[decode.scala:721:{14,22,41}] wire _io_out_shadowable_T_10 = _io_out_shadowable_T_4 | _io_out_shadowable_T_9; // @[decode.scala:719:17, :720:42, :721:22] assign _io_out_shadowable_T_11 = cs_is_shadowable & _io_out_shadowable_T_10; // @[decode.scala:697:41, :718:41, :720:42] assign io_out_shadowable_0 = _io_out_shadowable_T_11; // @[decode.scala:629:7, :718:41] assign io_out_is_ret = io_out_is_ret_0; // @[decode.scala:629:7] assign io_out_is_call = io_out_is_call_0; // @[decode.scala:629:7] assign io_out_target = io_out_target_0; // @[decode.scala:629:7] assign io_out_cfi_type = io_out_cfi_type_0; // @[decode.scala:629:7] assign io_out_sfb_offset_valid = io_out_sfb_offset_valid_0; // @[decode.scala:629:7] assign io_out_sfb_offset_bits = io_out_sfb_offset_bits_0; // @[decode.scala:629:7] assign io_out_shadowable = io_out_shadowable_0; // @[decode.scala:629:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File FPU.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.tile import chisel3._ import chisel3.util._ import chisel3.{DontCare, WireInit, withClock, withReset} import chisel3.experimental.SourceInfo import chisel3.experimental.dataview._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.rocket._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property case class FPUParams( minFLen: Int = 32, fLen: Int = 64, divSqrt: Boolean = true, sfmaLatency: Int = 3, dfmaLatency: Int = 4, fpmuLatency: Int = 2, ifpuLatency: Int = 2 ) object FPConstants { val RM_SZ = 3 val FLAGS_SZ = 5 } trait HasFPUCtrlSigs { val ldst = Bool() val wen = Bool() val ren1 = Bool() val ren2 = Bool() val ren3 = Bool() val swap12 = Bool() val swap23 = Bool() val typeTagIn = UInt(2.W) val typeTagOut = UInt(2.W) val fromint = Bool() val toint = Bool() val fastpipe = Bool() val fma = Bool() val div = Bool() val sqrt = Bool() val wflags = Bool() val vec = Bool() } class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) { val io = IO(new Bundle { val inst = Input(Bits(32.W)) val sigs = Output(new FPUCtrlSigs()) }) private val X2 = BitPat.dontCare(2) val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N) val h: Array[(BitPat, List[BitPat])] = Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N), FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N), FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N), FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N), FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N), FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N), FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N), FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N), FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N), FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N), FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N), FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N), FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N), FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N), FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N), FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N), FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N), FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N), FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N), FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N), FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N), FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N), FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N), FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N), FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N), FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N), FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N), FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N), FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N), FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N), FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N), FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N)) val f: Array[(BitPat, List[BitPat])] = Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N), FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N), FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N), FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N), FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N), FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N), FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N), FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N), FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N), FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N), FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N), FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N), FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N), FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N), FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N), FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N), FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N), FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N), FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N), FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N), FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N), FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N), FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N), FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N), FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N), FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N), FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N), FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N), FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N), FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N)) val d: Array[(BitPat, List[BitPat])] = Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N), FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N), FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N), FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N), FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N), FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N), FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N), FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N), FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N), FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N), FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N), FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N), FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N), FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N), FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N), FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N), FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N), FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N), FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N), FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N), FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N), FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N), FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N), FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N), FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N), FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N), FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N), FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N), FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N), FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N), FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N), FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N)) val fcvt_hd: Array[(BitPat, List[BitPat])] = Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N), FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N)) val vfmv_f_s: Array[(BitPat, List[BitPat])] = Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y)) val insns = ((minFLen, fLen) match { case (32, 32) => f case (16, 32) => h ++ f case (32, 64) => f ++ d case (16, 64) => h ++ f ++ d ++ fcvt_hd case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration") }) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]()) val decoder = DecodeLogic(io.inst, default, insns) val s = io.sigs val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12, s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint, s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec) sigs zip decoder map {case(s,d) => s := d} } class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) { val hartid = Input(UInt(hartIdLen.W)) val time = Input(UInt(xLen.W)) val inst = Input(Bits(32.W)) val fromint_data = Input(Bits(xLen.W)) val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W)) val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W)) val v_sew = Input(UInt(3.W)) val store_data = Output(Bits(fLen.W)) val toint_data = Output(Bits(xLen.W)) val ll_resp_val = Input(Bool()) val ll_resp_type = Input(Bits(3.W)) val ll_resp_tag = Input(UInt(5.W)) val ll_resp_data = Input(Bits(fLen.W)) val valid = Input(Bool()) val fcsr_rdy = Output(Bool()) val nack_mem = Output(Bool()) val illegal_rm = Output(Bool()) val killx = Input(Bool()) val killm = Input(Bool()) val dec = Output(new FPUCtrlSigs()) val sboard_set = Output(Bool()) val sboard_clr = Output(Bool()) val sboard_clra = Output(UInt(5.W)) val keep_clock_enabled = Input(Bool()) } class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) { val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs val cp_resp = Decoupled(new FPResult()) } class FPResult(implicit p: Parameters) extends CoreBundle()(p) { val data = Bits((fLen+1).W) val exc = Bits(FPConstants.FLAGS_SZ.W) } class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs { val rm = Bits(FPConstants.RM_SZ.W) val typ = Bits(2.W) val in1 = Bits(xLen.W) } class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs { val rm = Bits(FPConstants.RM_SZ.W) val fmaCmd = Bits(2.W) val typ = Bits(2.W) val fmt = Bits(2.W) val in1 = Bits((fLen+1).W) val in2 = Bits((fLen+1).W) val in3 = Bits((fLen+1).W) } case class FType(exp: Int, sig: Int) { def ieeeWidth = exp + sig def recodedWidth = ieeeWidth + 1 def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W) def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W) def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2) def classify(x: UInt) = { val sign = x(sig + exp) val code = x(exp + sig - 1, exp + sig - 3) val codeHi = code(2, 1) val isSpecial = codeHi === 3.U val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U val isZero = code === 0.U val isInf = isSpecial && !code(0) val isNaN = code.andR val isSNaN = isNaN && !x(sig-2) val isQNaN = isNaN && x(sig-2) Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign, isSubnormal && !sign, isZero && !sign, isZero && sign, isSubnormal && sign, isNormal && sign, isInf && sign) } // convert between formats, ignoring rounding, range, NaN def unsafeConvert(x: UInt, to: FType) = if (this == to) x else { val sign = x(sig + exp) val fractIn = x(sig - 2, 0) val expIn = x(sig + exp - 1, sig - 1) val fractOut = fractIn << to.sig >> sig val expOut = { val expCode = expIn(exp, exp - 2) val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0)) } Cat(sign, expOut, fractOut) } private def ieeeBundle = { val expWidth = exp class IEEEBundle extends Bundle { val sign = Bool() val exp = UInt(expWidth.W) val sig = UInt((ieeeWidth-expWidth-1).W) } new IEEEBundle } def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle) def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x) def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x) } object FType { val H = new FType(5, 11) val S = new FType(8, 24) val D = new FType(11, 53) val all = List(H, S, D) } trait HasFPUParameters { require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen)) val minFLen: Int val fLen: Int def xLen: Int val minXLen = 32 val nIntTypes = log2Ceil(xLen/minXLen) + 1 def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen) def minType = floatTypes.head def maxType = floatTypes.last def prevType(t: FType) = floatTypes(typeTag(t) - 1) def maxExpWidth = maxType.exp def maxSigWidth = maxType.sig def typeTag(t: FType) = floatTypes.indexOf(t) def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U // typeTag def H = typeTagGroup(FType.H) def S = typeTagGroup(FType.S) def D = typeTagGroup(FType.D) def I = typeTag(maxType).U private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = { require(xt.ieeeWidth == 2 * yt.ieeeWidth) val swizzledNaN = Cat( x(xt.sig + xt.exp, xt.sig + xt.exp - 3), x(xt.sig - 2, yt.recodedWidth - 1).andR, x(xt.sig + xt.exp - 5, xt.sig), y(yt.recodedWidth - 2), x(xt.sig - 2, yt.recodedWidth - 1), y(yt.recodedWidth - 1), y(yt.recodedWidth - 3, 0)) Mux(xt.isNaN(x), swizzledNaN, x) } // implement NaN unboxing for FU inputs def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = { val outType = exactType.getOrElse(maxType) def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = { val prev = if (t == minType) { Seq() } else { val prevT = prevType(t) val unswizzled = Cat( x(prevT.sig + prevT.exp - 1), x(t.sig - 1), x(prevT.sig + prevT.exp - 2, 0)) val prev = helper(unswizzled, prevT) val isbox = isBox(x, t) prev.map(p => (isbox && p._1, p._2)) } prev :+ (true.B, t.unsafeConvert(x, outType)) } val (oks, floats) = helper(x, maxType).unzip if (exactType.isEmpty || floatTypes.size == 1) { Mux(oks(tag), floats(tag), maxType.qNaN) } else { val t = exactType.get floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN) } } // make sure that the redundant bits in the NaN-boxed encoding are consistent def consistent(x: UInt): Bool = { def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else { val prevT = prevType(t) val unswizzled = Cat( x(prevT.sig + prevT.exp - 1), x(t.sig - 1), x(prevT.sig + prevT.exp - 2, 0)) val prevOK = !isBox(x, t) || helper(unswizzled, prevT) val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR prevOK && curOK } helper(x, maxType) } // generate a NaN box from an FU result def box(x: UInt, t: FType): UInt = { if (t == maxType) { x } else { val nt = floatTypes(typeTag(t) + 1) val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t) bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U } } // generate a NaN box from an FU result def box(x: UInt, tag: UInt): UInt = { val opts = floatTypes.map(t => box(x, t)) opts(tag) } // zap bits that hardfloat thinks are don't-cares, but we do care about def sanitizeNaN(x: UInt, t: FType): UInt = { if (typeTag(t) == 0) { x } else { val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W) Mux(t.isNaN(x), maskedNaN, x) } } // implement NaN boxing and recoding for FL*/fmv.*.x def recode(x: UInt, tag: UInt): UInt = { def helper(x: UInt, t: FType): UInt = { if (typeTag(t) == 0) { t.recode(x) } else { val prevT = prevType(t) box(t.recode(x), t, helper(x, prevT), prevT) } } // fill MSBs of subword loads to emulate a wider load of a NaN-boxed value val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U) helper(boxes(tag) | x, maxType) } // implement NaN unboxing and un-recoding for FS*/fmv.x.* def ieee(x: UInt, t: FType = maxType): UInt = { if (typeTag(t) == 0) { t.ieee(x) } else { val unrecoded = t.ieee(x) val prevT = prevType(t) val prevRecoded = Cat( x(prevT.recodedWidth-2), x(t.sig-1), x(prevT.recodedWidth-3, 0)) val prevUnrecoded = ieee(prevRecoded, prevT) Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0))) } } } abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed { class Output extends Bundle { val in = new FPInput val lt = Bool() val store = Bits(fLen.W) val toint = Bits(xLen.W) val exc = Bits(FPConstants.FLAGS_SZ.W) } val io = IO(new Bundle { val in = Flipped(Valid(new FPInput)) val out = Valid(new Output) }) val in = RegEnable(io.in.bits, io.in.valid) val valid = RegNext(io.in.valid) val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth)) dcmp.io.a := in.in1 dcmp.io.b := in.in2 dcmp.io.signaling := !in.rm(1) val tag = in.typeTagOut val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen)) else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag) val toint = WireDefault(toint_ieee) val intType = WireDefault(in.fmt(0)) io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag) io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType) io.out.bits.exc := 0.U when (in.rm(0)) { val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag) toint := classify_out | (toint_ieee >> minXLen << minXLen) intType := false.B } when (in.wflags) { // feq/flt/fle, fcvt toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen) io.out.bits.exc := dcmp.io.exceptionFlags intType := false.B when (!in.ren2) { // fcvt val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1) intType := cvtType val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen)) conv.io.in := in.in1 conv.io.roundingMode := in.rm conv.io.signedOut := ~in.typ(0) toint := conv.io.out io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0)) for (i <- 0 until nIntTypes-1) { val w = minXLen << i when (cvtType === i.U) { val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w)) narrow.io.in := in.in1 narrow.io.roundingMode := in.rm narrow.io.signedOut := ~in.typ(0) val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1) val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign)) val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1) when (invalid) { toint := Cat(conv.io.out >> w, excOut) } io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0)) } } } } io.out.valid := valid io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S) io.out.bits.in := in } class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed { val io = IO(new Bundle { val in = Flipped(Valid(new IntToFPInput)) val out = Valid(new FPResult) }) val in = Pipe(io.in) val tag = in.bits.typeTagIn val mux = Wire(new FPResult) mux.exc := 0.U mux.data := recode(in.bits.in1, tag) val intValue = { val res = WireDefault(in.bits.in1.asSInt) for (i <- 0 until nIntTypes-1) { val smallInt = in.bits.in1((minXLen << i) - 1, 0) when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) { res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt) } } res.asUInt } when (in.bits.wflags) { // fcvt // could be improved for RVD/RVQ with a single variable-position rounding // unit, rather than N fixed-position ones val i2fResults = for (t <- floatTypes) yield { val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig)) i2f.io.signedIn := ~in.bits.typ(0) i2f.io.in := intValue i2f.io.roundingMode := in.bits.rm i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding (sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags) } val (data, exc) = i2fResults.unzip val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last mux.data := dataPadded(tag) mux.exc := exc(tag) } io.out <> Pipe(in.valid, mux, latency-1) } class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed { val io = IO(new Bundle { val in = Flipped(Valid(new FPInput)) val out = Valid(new FPResult) val lt = Input(Bool()) // from FPToInt }) val in = Pipe(io.in) val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2)) val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0)) val fsgnjMux = Wire(new FPResult) fsgnjMux.exc := 0.U fsgnjMux.data := fsgnj when (in.bits.wflags) { // fmin/fmax val isnan1 = maxType.isNaN(in.bits.in1) val isnan2 = maxType.isNaN(in.bits.in2) val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2) val isNaNOut = isnan1 && isnan2 val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1 fsgnjMux.exc := isInvalid << 4 fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2)) } val inTag = in.bits.typeTagIn val outTag = in.bits.typeTagOut val mux = WireDefault(fsgnjMux) for (t <- floatTypes.init) { when (outTag === typeTag(t).U) { mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t)) } } when (in.bits.wflags && !in.bits.ren2) { // fcvt if (floatTypes.size > 1) { // widening conversions simply canonicalize NaN operands val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1) fsgnjMux.data := widened fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4 // narrowing conversions require rounding (for RVQ, this could be // optimized to use a single variable-position rounding unit, rather // than two fixed-position ones) for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) { val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig)) narrower.io.in := in.bits.in1 narrower.io.roundingMode := in.bits.rm narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding val narrowed = sanitizeNaN(narrower.io.out, outType) mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed) mux.exc := narrower.io.exceptionFlags } } } io.out <> Pipe(in.valid, mux, latency-1) } class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module { override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}" require(latency<=2) val io = IO(new Bundle { val validin = Input(Bool()) val op = Input(Bits(2.W)) val a = Input(Bits((expWidth + sigWidth + 1).W)) val b = Input(Bits((expWidth + sigWidth + 1).W)) val c = Input(Bits((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) val validout = Output(Bool()) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth)) val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth)) mulAddRecFNToRaw_preMul.io.op := io.op mulAddRecFNToRaw_preMul.io.a := io.a mulAddRecFNToRaw_preMul.io.b := io.b mulAddRecFNToRaw_preMul.io.c := io.c val mulAddResult = (mulAddRecFNToRaw_preMul.io.mulAddA * mulAddRecFNToRaw_preMul.io.mulAddB) +& mulAddRecFNToRaw_preMul.io.mulAddC val valid_stage0 = Wire(Bool()) val roundingMode_stage0 = Wire(UInt(3.W)) val detectTininess_stage0 = Wire(UInt(1.W)) val postmul_regs = if(latency>0) 1 else 0 mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0)) val round_regs = if(latency==2) 1 else 0 roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits io.validout := Pipe(valid_stage0, false.B, round_regs).valid roundRawFNToRecFN.io.infiniteExc := false.B io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags } class FPUFMAPipe(val latency: Int, val t: FType) (implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed { override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}" require(latency>0) val io = IO(new Bundle { val in = Flipped(Valid(new FPInput)) val out = Valid(new FPResult) }) val valid = RegNext(io.in.valid) val in = Reg(new FPInput) when (io.in.valid) { val one = 1.U << (t.sig + t.exp - 1) val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp)) val cmd_fma = io.in.bits.ren3 val cmd_addsub = io.in.bits.swap23 in := io.in.bits when (cmd_addsub) { in.in2 := one } when (!(cmd_fma || cmd_addsub)) { in.in3 := zero } } val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig)) fma.io.validin := valid fma.io.op := in.fmaCmd fma.io.roundingMode := in.rm fma.io.detectTininess := hardfloat.consts.tininess_afterRounding fma.io.a := in.in1 fma.io.b := in.in2 fma.io.c := in.in3 val res = Wire(new FPResult) res.data := sanitizeNaN(fma.io.out, t) res.exc := fma.io.exceptionFlags io.out := Pipe(fma.io.validout, res, (latency-3) max 0) } class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) { val io = IO(new FPUIO) val (useClockGating, useDebugROB) = coreParams match { case r: RocketCoreParams => val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1 (r.clockGate, sz < 1) case _ => (false, false) } val clock_en_reg = Reg(Bool()) val clock_en = clock_en_reg || io.cp_req.valid val gated_clock = if (!useClockGating) clock else ClockGate(clock, clock_en, "fpu_clock_gate") val fp_decoder = Module(new FPUDecoder) fp_decoder.io.inst := io.inst val id_ctrl = WireInit(fp_decoder.io.sigs) coreParams match { case r: RocketCoreParams => r.vector.map(v => { val v_decode = v.decoder(p) // Only need to get ren1 v_decode.io.inst := io.inst v_decode.io.vconfig := DontCare // core deals with this when (v_decode.io.legal && v_decode.io.read_frs1) { id_ctrl.ren1 := true.B id_ctrl.swap12 := false.B id_ctrl.toint := true.B id_ctrl.typeTagIn := I id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S) } when (v_decode.io.write_frd) { id_ctrl.wen := true.B } })} val ex_reg_valid = RegNext(io.valid, false.B) val ex_reg_inst = RegEnable(io.inst, io.valid) val ex_reg_ctrl = RegEnable(id_ctrl, io.valid) val ex_ra = List.fill(3)(Reg(UInt())) // load/vector response val load_wb = RegNext(io.ll_resp_val) val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val) val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val) val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val) class FPUImpl { // entering gated-clock domain val req_valid = ex_reg_valid || io.cp_req.valid val ex_cp_valid = io.cp_req.fire val mem_cp_valid = RegNext(ex_cp_valid, false.B) val wb_cp_valid = RegNext(mem_cp_valid, false.B) val mem_reg_valid = RegInit(false.B) val killm = (io.killm || io.nack_mem) && !mem_cp_valid // Kill X-stage instruction if M-stage is killed. This prevents it from // speculatively being sent to the div-sqrt unit, which can cause priority // inversion for two back-to-back divides, the first of which is killed. val killx = io.killx || mem_reg_valid && killm mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid) val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B) val cp_ctrl = Wire(new FPUCtrlSigs) cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs) io.cp_resp.valid := false.B io.cp_resp.bits.data := 0.U io.cp_resp.bits.exc := DontCare val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl) val mem_ctrl = RegEnable(ex_ctrl, req_valid) val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid) // CoreMonitorBundle to monitor fp register file writes val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare)) frfWriteBundle.foreach { i => i.clock := clock i.reset := reset i.hartid := io.hartid i.timer := io.time(31,0) i.valid := false.B i.wrenx := false.B i.wrenf := false.B i.excpt := false.B } // regfile val regfile = Mem(32, Bits((fLen+1).W)) when (load_wb) { val wdata = recode(load_wb_data, load_wb_typeTag) regfile(load_wb_tag) := wdata assert(consistent(wdata)) if (enableCommitLog) printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata)) if (useDebugROB) DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata)) frfWriteBundle(0).wrdst := load_wb_tag frfWriteBundle(0).wrenf := true.B frfWriteBundle(0).wrdata := ieee(wdata) } val ex_rs = ex_ra.map(a => regfile(a)) when (io.valid) { when (id_ctrl.ren1) { when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) } when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) } } when (id_ctrl.ren2) { when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) } when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) } when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) } } when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) } } val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12)) def fuInput(minT: Option[FType]): FPInput = { val req = Wire(new FPInput) val tag = ex_ctrl.typeTagIn req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs) req.rm := ex_rm req.in1 := unbox(ex_rs(0), tag, minT) req.in2 := unbox(ex_rs(1), tag, minT) req.in3 := unbox(ex_rs(2), tag, minT) req.typ := ex_reg_inst(21,20) req.fmt := ex_reg_inst(26,25) req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27)) when (ex_cp_valid) { req := io.cp_req.bits when (io.cp_req.bits.swap12) { req.in1 := io.cp_req.bits.in2 req.in2 := io.cp_req.bits.in1 } when (io.cp_req.bits.swap23) { req.in2 := io.cp_req.bits.in3 req.in3 := io.cp_req.bits.in2 } } req } val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S)) sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S sfma.io.in.bits := fuInput(Some(sfma.t)) val fpiu = Module(new FPToInt) fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags)) fpiu.io.in.bits := fuInput(None) io.store_data := fpiu.io.out.bits.store io.toint_data := fpiu.io.out.bits.toint when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){ io.cp_resp.bits.data := fpiu.io.out.bits.toint io.cp_resp.valid := true.B } val ifpu = Module(new IntToFP(cfg.ifpuLatency)) ifpu.io.in.valid := req_valid && ex_ctrl.fromint ifpu.io.in.bits := fpiu.io.in.bits ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data) val fpmu = Module(new FPToFP(cfg.fpmuLatency)) fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe fpmu.io.in.bits := fpiu.io.in.bits fpmu.io.lt := fpiu.io.out.bits.lt val divSqrt_wen = WireDefault(false.B) val divSqrt_inFlight = WireDefault(false.B) val divSqrt_waddr = Reg(UInt(5.W)) val divSqrt_cp = Reg(Bool()) val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W)) val divSqrt_wdata = Wire(UInt((fLen+1).W)) val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W)) divSqrt_typeTag := DontCare divSqrt_wdata := DontCare divSqrt_flags := DontCare // writeback arbitration case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult) val pipes = List( Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits), Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits), Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++ (fLen > 32).option({ val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D)) dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D dfma.io.in.bits := fuInput(Some(dfma.t)) Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits) }) ++ (minFLen == 16).option({ val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H)) hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H hfma.io.in.bits := fuInput(Some(hfma.t)) Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits) }) def latencyMask(c: FPUCtrlSigs, offset: Int) = { require(pipes.forall(_.lat >= offset)) pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_) } def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_) val maxLatency = pipes.map(_.lat).max val memLatencyMask = latencyMask(mem_ctrl, 2) class WBInfo extends Bundle { val rd = UInt(5.W) val typeTag = UInt(log2Up(floatTypes.size).W) val cp = Bool() val pipeid = UInt(log2Ceil(pipes.size).W) } val wen = RegInit(0.U((maxLatency-1).W)) val wbInfo = Reg(Vec(maxLatency-1, new WBInfo)) val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint) val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid) ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback") for (i <- 0 until maxLatency-2) { when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) } } wen := wen >> 1 when (mem_wen) { when (!killm) { wen := wen >> 1 | memLatencyMask } for (i <- 0 until maxLatency-1) { when (!write_port_busy && memLatencyMask(i)) { wbInfo(i).cp := mem_cp_valid wbInfo(i).typeTag := mem_ctrl.typeTagOut wbInfo(i).pipeid := pipeid(mem_ctrl) wbInfo(i).rd := mem_reg_inst(11,7) } } } val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd) val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp) val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag) val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag) val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid) when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) { assert(consistent(wdata)) regfile(waddr) := wdata if (enableCommitLog) { printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata)) } frfWriteBundle(1).wrdst := waddr frfWriteBundle(1).wrenf := true.B frfWriteBundle(1).wrdata := ieee(wdata) } if (useDebugROB) { DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata)) } when (wb_cp && (wen(0) || divSqrt_wen)) { io.cp_resp.bits.data := wdata io.cp_resp.valid := true.B } assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B, s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}") // Avoid structural hazards and nacking of external requests // toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight val wb_toint_valid = wb_reg_valid && wb_ctrl.toint val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint) io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0) io.fcsr_flags.bits := Mux(wb_toint_valid, wb_toint_exc, 0.U) | Mux(divSqrt_wen, divSqrt_flags, 0.U) | Mux(wen(0), wexc, 0.U) val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight) io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid io.dec <> id_ctrl def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_) io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec) io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U))) io.sboard_clra := waddr ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle") // we don't currently support round-max-magnitude (rm=4) io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U if (cfg.divSqrt) { val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B) when (divSqrt_inValid) { divSqrt_waddr := mem_reg_inst(11,7) divSqrt_cp := mem_cp_valid } ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider") ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard") ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback") for (t <- floatTypes) { val tag = mem_ctrl.typeTagOut val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) } divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U divSqrt.io.sqrtOp := mem_ctrl.sqrt divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t) divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t) divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) { divSqrt_wen := !divSqrt_killed divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t) divSqrt_flags := divSqrt.io.exceptionFlags divSqrt_typeTag := typeTag(t).U } } when (divSqrt_killed) { divSqrt_inFlight := false.B } } else { when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B } } // gate the clock clock_en_reg := !useClockGating.B || io.keep_clock_enabled || // chicken bit io.valid || // ID stage req_valid || // EX stage mem_reg_valid || mem_cp_valid || // MEM stage wb_reg_valid || wb_cp_valid || // WB stage wen.orR || divSqrt_inFlight || // post-WB stage io.ll_resp_val // load writeback } // leaving gated-clock domain val fpuImpl = withClock (gated_clock) { new FPUImpl } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"FPU_$label", "Core;;" + desc) }
module FPUFMAPipe_l4_f32_1( // @[FPU.scala:697:7] input clock, // @[FPU.scala:697:7] input reset, // @[FPU.scala:697:7] input io_in_valid, // @[FPU.scala:702:14] input io_in_bits_ldst, // @[FPU.scala:702:14] input io_in_bits_wen, // @[FPU.scala:702:14] input io_in_bits_ren1, // @[FPU.scala:702:14] input io_in_bits_ren2, // @[FPU.scala:702:14] input io_in_bits_ren3, // @[FPU.scala:702:14] input io_in_bits_swap12, // @[FPU.scala:702:14] input io_in_bits_swap23, // @[FPU.scala:702:14] input [1:0] io_in_bits_typeTagIn, // @[FPU.scala:702:14] input [1:0] io_in_bits_typeTagOut, // @[FPU.scala:702:14] input io_in_bits_fromint, // @[FPU.scala:702:14] input io_in_bits_toint, // @[FPU.scala:702:14] input io_in_bits_fastpipe, // @[FPU.scala:702:14] input io_in_bits_fma, // @[FPU.scala:702:14] input io_in_bits_div, // @[FPU.scala:702:14] input io_in_bits_sqrt, // @[FPU.scala:702:14] input io_in_bits_wflags, // @[FPU.scala:702:14] input [2:0] io_in_bits_rm, // @[FPU.scala:702:14] input [1:0] io_in_bits_fmaCmd, // @[FPU.scala:702:14] input [1:0] io_in_bits_typ, // @[FPU.scala:702:14] input [1:0] io_in_bits_fmt, // @[FPU.scala:702:14] input [64:0] io_in_bits_in1, // @[FPU.scala:702:14] input [64:0] io_in_bits_in2, // @[FPU.scala:702:14] input [64:0] io_in_bits_in3, // @[FPU.scala:702:14] output io_out_valid, // @[FPU.scala:702:14] output [64:0] io_out_bits_data, // @[FPU.scala:702:14] output [4:0] io_out_bits_exc // @[FPU.scala:702:14] ); wire [32:0] _fma_io_out; // @[FPU.scala:719:19] wire _fma_io_validout; // @[FPU.scala:719:19] wire io_in_valid_0 = io_in_valid; // @[FPU.scala:697:7] wire io_in_bits_ldst_0 = io_in_bits_ldst; // @[FPU.scala:697:7] wire io_in_bits_wen_0 = io_in_bits_wen; // @[FPU.scala:697:7] wire io_in_bits_ren1_0 = io_in_bits_ren1; // @[FPU.scala:697:7] wire io_in_bits_ren2_0 = io_in_bits_ren2; // @[FPU.scala:697:7] wire io_in_bits_ren3_0 = io_in_bits_ren3; // @[FPU.scala:697:7] wire io_in_bits_swap12_0 = io_in_bits_swap12; // @[FPU.scala:697:7] wire io_in_bits_swap23_0 = io_in_bits_swap23; // @[FPU.scala:697:7] wire [1:0] io_in_bits_typeTagIn_0 = io_in_bits_typeTagIn; // @[FPU.scala:697:7] wire [1:0] io_in_bits_typeTagOut_0 = io_in_bits_typeTagOut; // @[FPU.scala:697:7] wire io_in_bits_fromint_0 = io_in_bits_fromint; // @[FPU.scala:697:7] wire io_in_bits_toint_0 = io_in_bits_toint; // @[FPU.scala:697:7] wire io_in_bits_fastpipe_0 = io_in_bits_fastpipe; // @[FPU.scala:697:7] wire io_in_bits_fma_0 = io_in_bits_fma; // @[FPU.scala:697:7] wire io_in_bits_div_0 = io_in_bits_div; // @[FPU.scala:697:7] wire io_in_bits_sqrt_0 = io_in_bits_sqrt; // @[FPU.scala:697:7] wire io_in_bits_wflags_0 = io_in_bits_wflags; // @[FPU.scala:697:7] wire [2:0] io_in_bits_rm_0 = io_in_bits_rm; // @[FPU.scala:697:7] wire [1:0] io_in_bits_fmaCmd_0 = io_in_bits_fmaCmd; // @[FPU.scala:697:7] wire [1:0] io_in_bits_typ_0 = io_in_bits_typ; // @[FPU.scala:697:7] wire [1:0] io_in_bits_fmt_0 = io_in_bits_fmt; // @[FPU.scala:697:7] wire [64:0] io_in_bits_in1_0 = io_in_bits_in1; // @[FPU.scala:697:7] wire [64:0] io_in_bits_in2_0 = io_in_bits_in2; // @[FPU.scala:697:7] wire [64:0] io_in_bits_in3_0 = io_in_bits_in3; // @[FPU.scala:697:7] wire [31:0] one = 32'h80000000; // @[FPU.scala:710:19] wire [32:0] _zero_T_1 = 33'h100000000; // @[FPU.scala:711:57] wire io_in_bits_vec = 1'h0; // @[FPU.scala:697:7] wire io_out_pipe_out_valid; // @[Valid.scala:135:21] wire [64:0] io_out_pipe_out_bits_data; // @[Valid.scala:135:21] wire [4:0] io_out_pipe_out_bits_exc; // @[Valid.scala:135:21] wire [64:0] io_out_bits_data_0; // @[FPU.scala:697:7] wire [4:0] io_out_bits_exc_0; // @[FPU.scala:697:7] wire io_out_valid_0; // @[FPU.scala:697:7] reg valid; // @[FPU.scala:707:22] reg in_ldst; // @[FPU.scala:708:15] reg in_wen; // @[FPU.scala:708:15] reg in_ren1; // @[FPU.scala:708:15] reg in_ren2; // @[FPU.scala:708:15] reg in_ren3; // @[FPU.scala:708:15] reg in_swap12; // @[FPU.scala:708:15] reg in_swap23; // @[FPU.scala:708:15] reg [1:0] in_typeTagIn; // @[FPU.scala:708:15] reg [1:0] in_typeTagOut; // @[FPU.scala:708:15] reg in_fromint; // @[FPU.scala:708:15] reg in_toint; // @[FPU.scala:708:15] reg in_fastpipe; // @[FPU.scala:708:15] reg in_fma; // @[FPU.scala:708:15] reg in_div; // @[FPU.scala:708:15] reg in_sqrt; // @[FPU.scala:708:15] reg in_wflags; // @[FPU.scala:708:15] reg [2:0] in_rm; // @[FPU.scala:708:15] reg [1:0] in_fmaCmd; // @[FPU.scala:708:15] reg [1:0] in_typ; // @[FPU.scala:708:15] reg [1:0] in_fmt; // @[FPU.scala:708:15] reg [64:0] in_in1; // @[FPU.scala:708:15] reg [64:0] in_in2; // @[FPU.scala:708:15] reg [64:0] in_in3; // @[FPU.scala:708:15] wire [64:0] _zero_T = io_in_bits_in1_0 ^ io_in_bits_in2_0; // @[FPU.scala:697:7, :711:32] wire [64:0] zero = {32'h0, _zero_T[32], 32'h0}; // @[FPU.scala:711:{32,50}] wire [64:0] res_data; // @[FPU.scala:728:17] wire [4:0] res_exc; // @[FPU.scala:728:17] assign res_data = {32'h0, _fma_io_out}; // @[FPU.scala:719:19, :728:17, :729:12] reg io_out_pipe_v; // @[Valid.scala:141:24] assign io_out_pipe_out_valid = io_out_pipe_v; // @[Valid.scala:135:21, :141:24] reg [64:0] io_out_pipe_b_data; // @[Valid.scala:142:26] assign io_out_pipe_out_bits_data = io_out_pipe_b_data; // @[Valid.scala:135:21, :142:26] reg [4:0] io_out_pipe_b_exc; // @[Valid.scala:142:26] assign io_out_pipe_out_bits_exc = io_out_pipe_b_exc; // @[Valid.scala:135:21, :142:26] assign io_out_valid_0 = io_out_pipe_out_valid; // @[Valid.scala:135:21] assign io_out_bits_data_0 = io_out_pipe_out_bits_data; // @[Valid.scala:135:21] assign io_out_bits_exc_0 = io_out_pipe_out_bits_exc; // @[Valid.scala:135:21] always @(posedge clock) begin // @[FPU.scala:697:7] valid <= io_in_valid_0; // @[FPU.scala:697:7, :707:22] if (io_in_valid_0) begin // @[FPU.scala:697:7] in_ldst <= io_in_bits_ldst_0; // @[FPU.scala:697:7, :708:15] in_wen <= io_in_bits_wen_0; // @[FPU.scala:697:7, :708:15] in_ren1 <= io_in_bits_ren1_0; // @[FPU.scala:697:7, :708:15] in_ren2 <= io_in_bits_ren2_0; // @[FPU.scala:697:7, :708:15] in_ren3 <= io_in_bits_ren3_0; // @[FPU.scala:697:7, :708:15] in_swap12 <= io_in_bits_swap12_0; // @[FPU.scala:697:7, :708:15] in_swap23 <= io_in_bits_swap23_0; // @[FPU.scala:697:7, :708:15] in_typeTagIn <= io_in_bits_typeTagIn_0; // @[FPU.scala:697:7, :708:15] in_typeTagOut <= io_in_bits_typeTagOut_0; // @[FPU.scala:697:7, :708:15] in_fromint <= io_in_bits_fromint_0; // @[FPU.scala:697:7, :708:15] in_toint <= io_in_bits_toint_0; // @[FPU.scala:697:7, :708:15] in_fastpipe <= io_in_bits_fastpipe_0; // @[FPU.scala:697:7, :708:15] in_fma <= io_in_bits_fma_0; // @[FPU.scala:697:7, :708:15] in_div <= io_in_bits_div_0; // @[FPU.scala:697:7, :708:15] in_sqrt <= io_in_bits_sqrt_0; // @[FPU.scala:697:7, :708:15] in_wflags <= io_in_bits_wflags_0; // @[FPU.scala:697:7, :708:15] in_rm <= io_in_bits_rm_0; // @[FPU.scala:697:7, :708:15] in_fmaCmd <= io_in_bits_fmaCmd_0; // @[FPU.scala:697:7, :708:15] in_typ <= io_in_bits_typ_0; // @[FPU.scala:697:7, :708:15] in_fmt <= io_in_bits_fmt_0; // @[FPU.scala:697:7, :708:15] in_in1 <= io_in_bits_in1_0; // @[FPU.scala:697:7, :708:15] in_in2 <= io_in_bits_swap23_0 ? 65'h80000000 : io_in_bits_in2_0; // @[FPU.scala:697:7, :708:15, :714:8, :715:{23,32}] in_in3 <= io_in_bits_ren3_0 | io_in_bits_swap23_0 ? io_in_bits_in3_0 : zero; // @[FPU.scala:697:7, :708:15, :711:50, :714:8, :716:{21,37,46}] end if (_fma_io_validout) begin // @[FPU.scala:719:19] io_out_pipe_b_data <= res_data; // @[Valid.scala:142:26] io_out_pipe_b_exc <= res_exc; // @[Valid.scala:142:26] end if (reset) // @[FPU.scala:697:7] io_out_pipe_v <= 1'h0; // @[Valid.scala:141:24] else // @[FPU.scala:697:7] io_out_pipe_v <= _fma_io_validout; // @[Valid.scala:141:24] always @(posedge) MulAddRecFNPipe_l2_e8_s24_3 fma ( // @[FPU.scala:719:19] .clock (clock), .reset (reset), .io_validin (valid), // @[FPU.scala:707:22] .io_op (in_fmaCmd), // @[FPU.scala:708:15] .io_a (in_in1[32:0]), // @[FPU.scala:708:15, :724:12] .io_b (in_in2[32:0]), // @[FPU.scala:708:15, :725:12] .io_c (in_in3[32:0]), // @[FPU.scala:708:15, :726:12] .io_roundingMode (in_rm), // @[FPU.scala:708:15] .io_out (_fma_io_out), .io_exceptionFlags (res_exc), .io_validout (_fma_io_validout) ); // @[FPU.scala:719:19] assign io_out_valid = io_out_valid_0; // @[FPU.scala:697:7] assign io_out_bits_data = io_out_bits_data_0; // @[FPU.scala:697:7] assign io_out_bits_exc = io_out_bits_exc_0; // @[FPU.scala:697:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ class IntXbar()(implicit p: Parameters) extends LazyModule { val intnode = new IntNexusNode( sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) }, sourceFn = { seq => IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map { case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o))) }.flatten) }) { override def circuitIdentity = outputs == 1 && inputs == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { override def desiredName = s"IntXbar_i${intnode.in.size}_o${intnode.out.size}" val cat = intnode.in.map { case (i, e) => i.take(e.source.num) }.flatten intnode.out.foreach { case (o, _) => o := cat } } } class IntSyncXbar()(implicit p: Parameters) extends LazyModule { val intnode = new IntSyncNexusNode( sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) }, sourceFn = { seq => IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map { case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o))) }.flatten) }) { override def circuitIdentity = outputs == 1 && inputs == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncXbar_i${intnode.in.size}_o${intnode.out.size}" val cat = intnode.in.map { case (i, e) => i.sync.take(e.source.num) }.flatten intnode.out.foreach { case (o, _) => o.sync := cat } } } object IntXbar { def apply()(implicit p: Parameters): IntNode = { val xbar = LazyModule(new IntXbar) xbar.intnode } } object IntSyncXbar { def apply()(implicit p: Parameters): IntSyncNode = { val xbar = LazyModule(new IntSyncXbar) xbar.intnode } }
module IntXbar_i2_o1_2( // @[Xbar.scala:22:9] input auto_anon_in_1_0, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_1, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_2, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_3, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_4, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_0, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_1, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_2, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_3, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_4, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7, // @[LazyModuleImp.scala:107:25] output auto_anon_out_8, // @[LazyModuleImp.scala:107:25] output auto_anon_out_9 // @[LazyModuleImp.scala:107:25] ); wire auto_anon_in_1_0_0 = auto_anon_in_1_0; // @[Xbar.scala:22:9] wire auto_anon_in_1_1_0 = auto_anon_in_1_1; // @[Xbar.scala:22:9] wire auto_anon_in_1_2_0 = auto_anon_in_1_2; // @[Xbar.scala:22:9] wire auto_anon_in_1_3_0 = auto_anon_in_1_3; // @[Xbar.scala:22:9] wire auto_anon_in_1_4_0 = auto_anon_in_1_4; // @[Xbar.scala:22:9] wire auto_anon_in_0_0_0 = auto_anon_in_0_0; // @[Xbar.scala:22:9] wire auto_anon_in_0_1_0 = auto_anon_in_0_1; // @[Xbar.scala:22:9] wire auto_anon_in_0_2_0 = auto_anon_in_0_2; // @[Xbar.scala:22:9] wire auto_anon_in_0_3_0 = auto_anon_in_0_3; // @[Xbar.scala:22:9] wire auto_anon_in_0_4_0 = auto_anon_in_0_4; // @[Xbar.scala:22:9] wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire anonIn_1_0 = auto_anon_in_1_0_0; // @[Xbar.scala:22:9] wire anonIn_1_1 = auto_anon_in_1_1_0; // @[Xbar.scala:22:9] wire anonIn_1_2 = auto_anon_in_1_2_0; // @[Xbar.scala:22:9] wire anonIn_1_3 = auto_anon_in_1_3_0; // @[Xbar.scala:22:9] wire anonIn_1_4 = auto_anon_in_1_4_0; // @[Xbar.scala:22:9] wire anonIn_0 = auto_anon_in_0_0_0; // @[Xbar.scala:22:9] wire anonIn_1 = auto_anon_in_0_1_0; // @[Xbar.scala:22:9] wire anonIn_2 = auto_anon_in_0_2_0; // @[Xbar.scala:22:9] wire anonIn_3 = auto_anon_in_0_3_0; // @[Xbar.scala:22:9] wire anonIn_4 = auto_anon_in_0_4_0; // @[Xbar.scala:22:9] wire anonOut_0; // @[MixedNode.scala:542:17] wire anonOut_1; // @[MixedNode.scala:542:17] wire anonOut_2; // @[MixedNode.scala:542:17] wire anonOut_3; // @[MixedNode.scala:542:17] wire anonOut_4; // @[MixedNode.scala:542:17] wire anonOut_5; // @[MixedNode.scala:542:17] wire anonOut_6; // @[MixedNode.scala:542:17] wire anonOut_7; // @[MixedNode.scala:542:17] wire anonOut_8; // @[MixedNode.scala:542:17] wire anonOut_9; // @[MixedNode.scala:542:17] wire auto_anon_out_0_0; // @[Xbar.scala:22:9] wire auto_anon_out_1_0; // @[Xbar.scala:22:9] wire auto_anon_out_2_0; // @[Xbar.scala:22:9] wire auto_anon_out_3_0; // @[Xbar.scala:22:9] wire auto_anon_out_4_0; // @[Xbar.scala:22:9] wire auto_anon_out_5_0; // @[Xbar.scala:22:9] wire auto_anon_out_6_0; // @[Xbar.scala:22:9] wire auto_anon_out_7_0; // @[Xbar.scala:22:9] wire auto_anon_out_8_0; // @[Xbar.scala:22:9] wire auto_anon_out_9_0; // @[Xbar.scala:22:9] assign anonOut_0 = anonIn_0; // @[MixedNode.scala:542:17, :551:17] assign anonOut_1 = anonIn_1; // @[MixedNode.scala:542:17, :551:17] assign anonOut_2 = anonIn_2; // @[MixedNode.scala:542:17, :551:17] assign anonOut_3 = anonIn_3; // @[MixedNode.scala:542:17, :551:17] assign anonOut_4 = anonIn_4; // @[MixedNode.scala:542:17, :551:17] assign anonOut_5 = anonIn_1_0; // @[MixedNode.scala:542:17, :551:17] assign anonOut_6 = anonIn_1_1; // @[MixedNode.scala:542:17, :551:17] assign anonOut_7 = anonIn_1_2; // @[MixedNode.scala:542:17, :551:17] assign anonOut_8 = anonIn_1_3; // @[MixedNode.scala:542:17, :551:17] assign anonOut_9 = anonIn_1_4; // @[MixedNode.scala:542:17, :551:17] assign auto_anon_out_0_0 = anonOut_0; // @[Xbar.scala:22:9] assign auto_anon_out_1_0 = anonOut_1; // @[Xbar.scala:22:9] assign auto_anon_out_2_0 = anonOut_2; // @[Xbar.scala:22:9] assign auto_anon_out_3_0 = anonOut_3; // @[Xbar.scala:22:9] assign auto_anon_out_4_0 = anonOut_4; // @[Xbar.scala:22:9] assign auto_anon_out_5_0 = anonOut_5; // @[Xbar.scala:22:9] assign auto_anon_out_6_0 = anonOut_6; // @[Xbar.scala:22:9] assign auto_anon_out_7_0 = anonOut_7; // @[Xbar.scala:22:9] assign auto_anon_out_8_0 = anonOut_8; // @[Xbar.scala:22:9] assign auto_anon_out_9_0 = anonOut_9; // @[Xbar.scala:22:9] assign auto_anon_out_0 = auto_anon_out_0_0; // @[Xbar.scala:22:9] assign auto_anon_out_1 = auto_anon_out_1_0; // @[Xbar.scala:22:9] assign auto_anon_out_2 = auto_anon_out_2_0; // @[Xbar.scala:22:9] assign auto_anon_out_3 = auto_anon_out_3_0; // @[Xbar.scala:22:9] assign auto_anon_out_4 = auto_anon_out_4_0; // @[Xbar.scala:22:9] assign auto_anon_out_5 = auto_anon_out_5_0; // @[Xbar.scala:22:9] assign auto_anon_out_6 = auto_anon_out_6_0; // @[Xbar.scala:22:9] assign auto_anon_out_7 = auto_anon_out_7_0; // @[Xbar.scala:22:9] assign auto_anon_out_8 = auto_anon_out_8_0; // @[Xbar.scala:22:9] assign auto_anon_out_9 = auto_anon_out_9_0; // @[Xbar.scala:22:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_15( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [127:0] _GEN_0 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [127:0] _GEN_3 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [64:0] inflight_1; // @[Monitor.scala:726:35] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_242( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0, // @[Tile.scala:17:14] output io_bad_dataflow // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] wire io_bad_dataflow_0; // @[Tile.scala:16:7] PE_498 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0), .io_bad_dataflow (io_bad_dataflow_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_107( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_128 io_out_sink_valid ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Repeater.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{Decoupled, DecoupledIO} // A Repeater passes its input to its output, unless repeat is asserted. // When repeat is asserted, the Repeater copies the input and repeats it next cycle. class Repeater[T <: Data](gen: T) extends Module { override def desiredName = s"Repeater_${gen.typeName}" val io = IO( new Bundle { val repeat = Input(Bool()) val full = Output(Bool()) val enq = Flipped(Decoupled(gen.cloneType)) val deq = Decoupled(gen.cloneType) } ) val full = RegInit(false.B) val saved = Reg(gen.cloneType) // When !full, a repeater is pass-through io.deq.valid := io.enq.valid || full io.enq.ready := io.deq.ready && !full io.deq.bits := Mux(full, saved, io.enq.bits) io.full := full when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits } when (io.deq.fire && !io.repeat) { full := false.B } } object Repeater { def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = { val repeater = Module(new Repeater(chiselTypeOf(enq.bits))) repeater.io.repeat := repeat repeater.io.enq <> enq repeater.io.deq } }
module Repeater_TLBundleA_a29d64s7k1z3u( // @[Repeater.scala:10:7] input clock, // @[Repeater.scala:10:7] input reset, // @[Repeater.scala:10:7] input io_repeat, // @[Repeater.scala:13:14] output io_full, // @[Repeater.scala:13:14] output io_enq_ready, // @[Repeater.scala:13:14] input io_enq_valid, // @[Repeater.scala:13:14] input [2:0] io_enq_bits_opcode, // @[Repeater.scala:13:14] input [2:0] io_enq_bits_param, // @[Repeater.scala:13:14] input [2:0] io_enq_bits_size, // @[Repeater.scala:13:14] input [6:0] io_enq_bits_source, // @[Repeater.scala:13:14] input [28:0] io_enq_bits_address, // @[Repeater.scala:13:14] input [7:0] io_enq_bits_mask, // @[Repeater.scala:13:14] input [63:0] io_enq_bits_data, // @[Repeater.scala:13:14] input io_enq_bits_corrupt, // @[Repeater.scala:13:14] input io_deq_ready, // @[Repeater.scala:13:14] output io_deq_valid, // @[Repeater.scala:13:14] output [2:0] io_deq_bits_opcode, // @[Repeater.scala:13:14] output [2:0] io_deq_bits_param, // @[Repeater.scala:13:14] output [2:0] io_deq_bits_size, // @[Repeater.scala:13:14] output [6:0] io_deq_bits_source, // @[Repeater.scala:13:14] output [28:0] io_deq_bits_address, // @[Repeater.scala:13:14] output [7:0] io_deq_bits_mask, // @[Repeater.scala:13:14] output io_deq_bits_corrupt // @[Repeater.scala:13:14] ); wire io_repeat_0 = io_repeat; // @[Repeater.scala:10:7] wire io_enq_valid_0 = io_enq_valid; // @[Repeater.scala:10:7] wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[Repeater.scala:10:7] wire [2:0] io_enq_bits_param_0 = io_enq_bits_param; // @[Repeater.scala:10:7] wire [2:0] io_enq_bits_size_0 = io_enq_bits_size; // @[Repeater.scala:10:7] wire [6:0] io_enq_bits_source_0 = io_enq_bits_source; // @[Repeater.scala:10:7] wire [28:0] io_enq_bits_address_0 = io_enq_bits_address; // @[Repeater.scala:10:7] wire [7:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[Repeater.scala:10:7] wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[Repeater.scala:10:7] wire io_enq_bits_corrupt_0 = io_enq_bits_corrupt; // @[Repeater.scala:10:7] wire io_deq_ready_0 = io_deq_ready; // @[Repeater.scala:10:7] wire _io_enq_ready_T_1; // @[Repeater.scala:25:32] wire _io_deq_valid_T; // @[Repeater.scala:24:32] wire [2:0] _io_deq_bits_T_opcode; // @[Repeater.scala:26:21] wire [2:0] _io_deq_bits_T_param; // @[Repeater.scala:26:21] wire [2:0] _io_deq_bits_T_size; // @[Repeater.scala:26:21] wire [6:0] _io_deq_bits_T_source; // @[Repeater.scala:26:21] wire [28:0] _io_deq_bits_T_address; // @[Repeater.scala:26:21] wire [7:0] _io_deq_bits_T_mask; // @[Repeater.scala:26:21] wire [63:0] _io_deq_bits_T_data; // @[Repeater.scala:26:21] wire _io_deq_bits_T_corrupt; // @[Repeater.scala:26:21] wire io_enq_ready_0; // @[Repeater.scala:10:7] wire [2:0] io_deq_bits_opcode_0; // @[Repeater.scala:10:7] wire [2:0] io_deq_bits_param_0; // @[Repeater.scala:10:7] wire [2:0] io_deq_bits_size_0; // @[Repeater.scala:10:7] wire [6:0] io_deq_bits_source_0; // @[Repeater.scala:10:7] wire [28:0] io_deq_bits_address_0; // @[Repeater.scala:10:7] wire [7:0] io_deq_bits_mask_0; // @[Repeater.scala:10:7] wire [63:0] io_deq_bits_data; // @[Repeater.scala:10:7] wire io_deq_bits_corrupt_0; // @[Repeater.scala:10:7] wire io_deq_valid_0; // @[Repeater.scala:10:7] wire io_full_0; // @[Repeater.scala:10:7] reg full; // @[Repeater.scala:20:21] assign io_full_0 = full; // @[Repeater.scala:10:7, :20:21] reg [2:0] saved_opcode; // @[Repeater.scala:21:18] reg [2:0] saved_param; // @[Repeater.scala:21:18] reg [2:0] saved_size; // @[Repeater.scala:21:18] reg [6:0] saved_source; // @[Repeater.scala:21:18] reg [28:0] saved_address; // @[Repeater.scala:21:18] reg [7:0] saved_mask; // @[Repeater.scala:21:18] reg [63:0] saved_data; // @[Repeater.scala:21:18] reg saved_corrupt; // @[Repeater.scala:21:18] assign _io_deq_valid_T = io_enq_valid_0 | full; // @[Repeater.scala:10:7, :20:21, :24:32] assign io_deq_valid_0 = _io_deq_valid_T; // @[Repeater.scala:10:7, :24:32] wire _io_enq_ready_T = ~full; // @[Repeater.scala:20:21, :25:35] assign _io_enq_ready_T_1 = io_deq_ready_0 & _io_enq_ready_T; // @[Repeater.scala:10:7, :25:{32,35}] assign io_enq_ready_0 = _io_enq_ready_T_1; // @[Repeater.scala:10:7, :25:32] assign _io_deq_bits_T_opcode = full ? saved_opcode : io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_param = full ? saved_param : io_enq_bits_param_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_size = full ? saved_size : io_enq_bits_size_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_source = full ? saved_source : io_enq_bits_source_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_address = full ? saved_address : io_enq_bits_address_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_mask = full ? saved_mask : io_enq_bits_mask_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_data = full ? saved_data : io_enq_bits_data_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_corrupt = full ? saved_corrupt : io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign io_deq_bits_opcode_0 = _io_deq_bits_T_opcode; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_param_0 = _io_deq_bits_T_param; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_size_0 = _io_deq_bits_T_size; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_source_0 = _io_deq_bits_T_source; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_address_0 = _io_deq_bits_T_address; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_mask_0 = _io_deq_bits_T_mask; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_data = _io_deq_bits_T_data; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_corrupt_0 = _io_deq_bits_T_corrupt; // @[Repeater.scala:10:7, :26:21] wire _T_1 = io_enq_ready_0 & io_enq_valid_0 & io_repeat_0; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[Repeater.scala:10:7] if (reset) // @[Repeater.scala:10:7] full <= 1'h0; // @[Repeater.scala:20:21] else // @[Repeater.scala:10:7] full <= ~(io_deq_ready_0 & io_deq_valid_0 & ~io_repeat_0) & (_T_1 | full); // @[Decoupled.scala:51:35] if (_T_1) begin // @[Decoupled.scala:51:35] saved_opcode <= io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :21:18] saved_param <= io_enq_bits_param_0; // @[Repeater.scala:10:7, :21:18] saved_size <= io_enq_bits_size_0; // @[Repeater.scala:10:7, :21:18] saved_source <= io_enq_bits_source_0; // @[Repeater.scala:10:7, :21:18] saved_address <= io_enq_bits_address_0; // @[Repeater.scala:10:7, :21:18] saved_mask <= io_enq_bits_mask_0; // @[Repeater.scala:10:7, :21:18] saved_data <= io_enq_bits_data_0; // @[Repeater.scala:10:7, :21:18] saved_corrupt <= io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :21:18] end always @(posedge) assign io_full = io_full_0; // @[Repeater.scala:10:7] assign io_enq_ready = io_enq_ready_0; // @[Repeater.scala:10:7] assign io_deq_valid = io_deq_valid_0; // @[Repeater.scala:10:7] assign io_deq_bits_opcode = io_deq_bits_opcode_0; // @[Repeater.scala:10:7] assign io_deq_bits_param = io_deq_bits_param_0; // @[Repeater.scala:10:7] assign io_deq_bits_size = io_deq_bits_size_0; // @[Repeater.scala:10:7] assign io_deq_bits_source = io_deq_bits_source_0; // @[Repeater.scala:10:7] assign io_deq_bits_address = io_deq_bits_address_0; // @[Repeater.scala:10:7] assign io_deq_bits_mask = io_deq_bits_mask_0; // @[Repeater.scala:10:7] assign io_deq_bits_corrupt = io_deq_bits_corrupt_0; // @[Repeater.scala:10:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_112( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_368 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module cc_banks_4( // @[DescribedSRAM.scala:17:26] input [11:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [63:0] RW0_wdata, output [63:0] RW0_rdata ); cc_banks_0_ext cc_banks_0_ext ( // @[DescribedSRAM.scala:17:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ class IntXbar()(implicit p: Parameters) extends LazyModule { val intnode = new IntNexusNode( sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) }, sourceFn = { seq => IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map { case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o))) }.flatten) }) { override def circuitIdentity = outputs == 1 && inputs == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { override def desiredName = s"IntXbar_i${intnode.in.size}_o${intnode.out.size}" val cat = intnode.in.map { case (i, e) => i.take(e.source.num) }.flatten intnode.out.foreach { case (o, _) => o := cat } } } class IntSyncXbar()(implicit p: Parameters) extends LazyModule { val intnode = new IntSyncNexusNode( sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) }, sourceFn = { seq => IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map { case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o))) }.flatten) }) { override def circuitIdentity = outputs == 1 && inputs == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncXbar_i${intnode.in.size}_o${intnode.out.size}" val cat = intnode.in.map { case (i, e) => i.sync.take(e.source.num) }.flatten intnode.out.foreach { case (o, _) => o.sync := cat } } } object IntXbar { def apply()(implicit p: Parameters): IntNode = { val xbar = LazyModule(new IntXbar) xbar.intnode } } object IntSyncXbar { def apply()(implicit p: Parameters): IntSyncNode = { val xbar = LazyModule(new IntSyncXbar) xbar.intnode } }
module IntXbar_i2_o1(); // @[Xbar.scala:22:9] wire auto_anon_in_1_0 = 1'h0; // @[Xbar.scala:22:9] wire auto_anon_in_0_0 = 1'h0; // @[Xbar.scala:22:9] wire auto_anon_out_0 = 1'h0; // @[Xbar.scala:22:9] wire auto_anon_out_1 = 1'h0; // @[Xbar.scala:22:9] wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire anonIn_0 = 1'h0; // @[MixedNode.scala:551:17] wire anonIn_1_0 = 1'h0; // @[MixedNode.scala:551:17] wire anonOut_0 = 1'h0; // @[MixedNode.scala:542:17] wire anonOut_1 = 1'h0; // @[MixedNode.scala:542:17] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_536( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_65( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [6:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [7:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] reg [6:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [7:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [255:0] inflight; // @[Monitor.scala:614:27] reg [1023:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [2047:0] inflight_sizes; // @[Monitor.scala:618:33] reg [6:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 7'h0; // @[Edges.scala:229:27, :231:25] reg [6:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 7'h0; // @[Edges.scala:229:27, :231:25] wire [255:0] _GEN_0 = {248'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [255:0] _GEN_3 = {248'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [255:0] inflight_1; // @[Monitor.scala:726:35] reg [2047:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [6:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 7'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_155( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_176 io_out_sink_valid ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File CacheCork.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{IdRange, RegionType, TransferSizes} import freechips.rocketchip.tilelink.TLMessages.{ AcquireBlock, AcquirePerm, Get, PutFullData, PutPartialData, Release, ReleaseData, Grant, GrantData, AccessAck, AccessAckData, ReleaseAck } import freechips.rocketchip.util.IDPool import freechips.rocketchip.util.DataToAugmentedData case class TLCacheCorkParams( unsafe: Boolean = false, sinkIds: Int = 8) class TLCacheCork(params: TLCacheCorkParams = TLCacheCorkParams())(implicit p: Parameters) extends LazyModule { val unsafe = params.unsafe val sinkIds = params.sinkIds val node = TLAdapterNode( clientFn = { case cp => cp.v1copy(clients = cp.clients.map { c => c.v1copy( supportsProbe = TransferSizes.none, sourceId = IdRange(c.sourceId.start*2, c.sourceId.end*2))})}, managerFn = { case mp => mp.v1copy( endSinkId = if (mp.managers.exists(_.regionType == RegionType.UNCACHED)) sinkIds else 0, managers = mp.managers.map { m => m.v1copy( supportsAcquireB = if (m.regionType == RegionType.UNCACHED) m.supportsGet else m.supportsAcquireB, supportsAcquireT = if (m.regionType == RegionType.UNCACHED) m.supportsPutFull.intersect(m.supportsGet) else m.supportsAcquireT, alwaysGrantsT = if (m.regionType == RegionType.UNCACHED) m.supportsPutFull else m.alwaysGrantsT)})}) lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // If this adapter does not need to do anything, toss all the above work and just directly connect if (!edgeIn.manager.anySupportAcquireB) { out <> in } else { val clients = edgeIn.client.clients val caches = clients.filter(_.supports.probe) require (clients.size == 1 || caches.size == 0 || unsafe, s"Only one client can safely use a TLCacheCork; ${clients.map(_.name)}") require (caches.size <= 1 || unsafe, s"Only one caching client allowed; ${clients.map(_.name)}") edgeOut.manager.managers.foreach { case m => require (!m.supportsAcquireB || unsafe, s"Cannot support caches beyond the Cork; ${m.name}") require (m.regionType <= RegionType.UNCACHED) } // The Cork turns [Acquire=>Get] => [AccessAckData=>GrantData] // and [ReleaseData=>PutFullData] => [AccessAck=>ReleaseAck] // We need to encode information sufficient to reverse the transformation in output. // A caveat is that we get Acquire+Release with the same source and must keep the // source unique after transformation onto the A channel. // The coding scheme is: // Release, AcquireBlock.BtoT, AcquirePerm => instant response // Put{Full,Partial}Data: 1, ReleaseData: 0 => AccessAck // {Arithmetic,Logical}Data,Get: 0, Acquire: 1 => AccessAckData // Hint:0 => HintAck // The CacheCork can potentially send the same source twice if a client sends // simultaneous Release and AMO/Get with the same source. It will still correctly // decode the messages based on the D.opcode, but the double use violates the spec. // Fortunately, no masters we know of behave this way! // Take requests from A to A or D (if BtoT Acquire) val a_a = Wire(chiselTypeOf(out.a)) val a_d = Wire(chiselTypeOf(in.d)) val isPut = in.a.bits.opcode === PutFullData || in.a.bits.opcode === PutPartialData val toD = (in.a.bits.opcode === AcquireBlock && in.a.bits.param === TLPermissions.BtoT) || (in.a.bits.opcode === AcquirePerm) in.a.ready := Mux(toD, a_d.ready, a_a.ready) a_a.valid := in.a.valid && !toD a_a.bits := in.a.bits a_a.bits.source := in.a.bits.source << 1 | Mux(isPut, 1.U, 0.U) // Transform Acquire into Get when (in.a.bits.opcode === AcquireBlock || in.a.bits.opcode === AcquirePerm) { a_a.bits.opcode := Get a_a.bits.param := 0.U a_a.bits.source := in.a.bits.source << 1 | 1.U } // Upgrades are instantly successful a_d.valid := in.a.valid && toD a_d.bits := edgeIn.Grant( fromSink = 0.U, toSource = in.a.bits.source, lgSize = in.a.bits.size, capPermissions = TLPermissions.toT) // Take ReleaseData from C to A; Release from C to D val c_a = Wire(chiselTypeOf(out.a)) c_a.valid := in.c.valid && in.c.bits.opcode === ReleaseData c_a.bits := edgeOut.Put( fromSource = in.c.bits.source << 1, toAddress = in.c.bits.address, lgSize = in.c.bits.size, data = in.c.bits.data, corrupt = in.c.bits.corrupt)._2 c_a.bits.user :<= in.c.bits.user // Releases without Data succeed instantly val c_d = Wire(chiselTypeOf(in.d)) c_d.valid := in.c.valid && in.c.bits.opcode === Release c_d.bits := edgeIn.ReleaseAck(in.c.bits) assert (!in.c.valid || in.c.bits.opcode === Release || in.c.bits.opcode === ReleaseData) in.c.ready := Mux(in.c.bits.opcode === Release, c_d.ready, c_a.ready) // Discard E in.e.ready := true.B // Block B; should never happen out.b.ready := false.B assert (!out.b.valid) // Track in-flight sinkIds val pool = Module(new IDPool(sinkIds)) pool.io.free.valid := in.e.fire pool.io.free.bits := in.e.bits.sink val in_d = Wire(chiselTypeOf(in.d)) val d_first = edgeOut.first(in_d) val d_grant = in_d.bits.opcode === GrantData || in_d.bits.opcode === Grant pool.io.alloc.ready := in.d.fire && d_first && d_grant in.d.valid := in_d.valid && (pool.io.alloc.valid || !d_first || !d_grant) in_d.ready := in.d.ready && (pool.io.alloc.valid || !d_first || !d_grant) in.d.bits := in_d.bits in.d.bits.sink := pool.io.alloc.bits holdUnless d_first // Take responses from D and transform them val d_d = Wire(chiselTypeOf(in.d)) d_d <> out.d d_d.bits.source := out.d.bits.source >> 1 // Record if a target was writable and auto-promote toT if it was // This is structured so that the vector can be constant prop'd away val wSourceVec = Reg(Vec(edgeIn.client.endSourceId, Bool())) val aWOk = edgeIn.manager.fastProperty(in.a.bits.address, !_.supportsPutFull.none, (b:Boolean) => b.B) val dWOk = wSourceVec(d_d.bits.source) val bypass = (edgeIn.manager.minLatency == 0).B && in.a.valid && in.a.bits.source === d_d.bits.source val dWHeld = Mux(bypass, aWOk, dWOk) holdUnless d_first when (in.a.fire) { wSourceVec(in.a.bits.source) := aWOk } // Wipe out any unused registers edgeIn.client.unusedSources.foreach { id => wSourceVec(id) := edgeIn.manager.anySupportPutFull.B } when (out.d.bits.opcode === AccessAckData && out.d.bits.source(0)) { d_d.bits.opcode := GrantData d_d.bits.param := Mux(dWHeld, TLPermissions.toT, TLPermissions.toB) } when (out.d.bits.opcode === AccessAck && !out.d.bits.source(0)) { d_d.bits.opcode := ReleaseAck } // Combine the sources of messages into the channels TLArbiter(TLArbiter.lowestIndexFirst)(out.a, (edgeOut.numBeats1(c_a.bits), c_a), (edgeOut.numBeats1(a_a.bits), a_a)) TLArbiter(TLArbiter.lowestIndexFirst)(in_d, (edgeIn .numBeats1(d_d.bits), d_d), (0.U, Queue(c_d, 2)), (0.U, Queue(a_d, 2))) // Tie off unused ports in.b.valid := false.B out.c.valid := false.B out.e.valid := false.B } } } } object TLCacheCork { def apply(params: TLCacheCorkParams)(implicit p: Parameters): TLNode = { val cork = LazyModule(new TLCacheCork(params)) cork.node } def apply(unsafe: Boolean = false, sinkIds: Int = 8)(implicit p: Parameters): TLNode = { apply(TLCacheCorkParams(unsafe, sinkIds)) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } }
module TLCacheCork( // @[CacheCork.scala:42:9] input clock, // @[CacheCork.scala:42:9] input reset, // @[CacheCork.scala:42:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_in_c_ready, // @[LazyModuleImp.scala:107:25] input auto_in_c_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_e_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire [2:0] _in_d_bits_T_39; // @[Mux.scala:30:73] wire in_d_valid; // @[Arbiter.scala:96:24] wire a_a_ready; // @[Arbiter.scala:94:31] wire c_a_ready; // @[Arbiter.scala:94:31] wire nodeIn_d_valid; // @[CacheCork.scala:135:34] wire _q_1_io_enq_ready; // @[Decoupled.scala:362:21] wire _q_1_io_deq_valid; // @[Decoupled.scala:362:21] wire [2:0] _q_1_io_deq_bits_opcode; // @[Decoupled.scala:362:21] wire [1:0] _q_1_io_deq_bits_param; // @[Decoupled.scala:362:21] wire [2:0] _q_1_io_deq_bits_size; // @[Decoupled.scala:362:21] wire [2:0] _q_1_io_deq_bits_source; // @[Decoupled.scala:362:21] wire _q_1_io_deq_bits_denied; // @[Decoupled.scala:362:21] wire [63:0] _q_1_io_deq_bits_data; // @[Decoupled.scala:362:21] wire _q_1_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] wire _q_io_enq_ready; // @[Decoupled.scala:362:21] wire _q_io_deq_valid; // @[Decoupled.scala:362:21] wire [2:0] _q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] wire [1:0] _q_io_deq_bits_param; // @[Decoupled.scala:362:21] wire [2:0] _q_io_deq_bits_size; // @[Decoupled.scala:362:21] wire [2:0] _q_io_deq_bits_source; // @[Decoupled.scala:362:21] wire _q_io_deq_bits_denied; // @[Decoupled.scala:362:21] wire [63:0] _q_io_deq_bits_data; // @[Decoupled.scala:362:21] wire _q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] wire _pool_io_alloc_valid; // @[CacheCork.scala:127:26] wire [2:0] _pool_io_alloc_bits; // @[CacheCork.scala:127:26] wire _toD_T = auto_in_a_bits_opcode == 3'h6; // @[CacheCork.scala:77:37] wire toD = _toD_T & auto_in_a_bits_param == 3'h2 | (&auto_in_a_bits_opcode); // @[CacheCork.scala:77:{37,54,73,97}, :78:37] wire nodeIn_a_ready = toD ? _q_1_io_enq_ready : a_a_ready; // @[Decoupled.scala:362:21] wire a_a_valid = auto_in_a_valid & ~toD; // @[CacheCork.scala:77:97, :81:{33,36}] wire _GEN = _toD_T | (&auto_in_a_bits_opcode); // @[CacheCork.scala:77:37, :78:37, :86:49] wire [2:0] a_a_bits_opcode = _GEN ? 3'h4 : auto_in_a_bits_opcode; // @[CacheCork.scala:82:18, :86:{49,86}, :87:27] wire winner_0 = auto_in_c_valid & (&auto_in_c_bits_opcode); // @[CacheCork.scala:102:{33,53}] wire c_a_bits_a_mask_sub_sub_sub_0_1 = auto_in_c_bits_size > 3'h2; // @[Misc.scala:206:21] wire c_a_bits_a_mask_sub_sub_size = auto_in_c_bits_size[1:0] == 2'h2; // @[OneHot.scala:64:49] wire c_a_bits_a_mask_sub_sub_0_1 = c_a_bits_a_mask_sub_sub_sub_0_1 | c_a_bits_a_mask_sub_sub_size & ~(auto_in_c_bits_address[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire c_a_bits_a_mask_sub_sub_1_1 = c_a_bits_a_mask_sub_sub_sub_0_1 | c_a_bits_a_mask_sub_sub_size & auto_in_c_bits_address[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire c_a_bits_a_mask_sub_size = auto_in_c_bits_size[1:0] == 2'h1; // @[OneHot.scala:64:49] wire c_a_bits_a_mask_sub_0_2 = ~(auto_in_c_bits_address[2]) & ~(auto_in_c_bits_address[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire c_a_bits_a_mask_sub_0_1 = c_a_bits_a_mask_sub_sub_0_1 | c_a_bits_a_mask_sub_size & c_a_bits_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire c_a_bits_a_mask_sub_1_2 = ~(auto_in_c_bits_address[2]) & auto_in_c_bits_address[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire c_a_bits_a_mask_sub_1_1 = c_a_bits_a_mask_sub_sub_0_1 | c_a_bits_a_mask_sub_size & c_a_bits_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire c_a_bits_a_mask_sub_2_2 = auto_in_c_bits_address[2] & ~(auto_in_c_bits_address[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire c_a_bits_a_mask_sub_2_1 = c_a_bits_a_mask_sub_sub_1_1 | c_a_bits_a_mask_sub_size & c_a_bits_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire c_a_bits_a_mask_sub_3_2 = auto_in_c_bits_address[2] & auto_in_c_bits_address[1]; // @[Misc.scala:210:26, :214:27] wire c_a_bits_a_mask_sub_3_1 = c_a_bits_a_mask_sub_sub_1_1 | c_a_bits_a_mask_sub_size & c_a_bits_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire _nodeIn_c_ready_T = auto_in_c_bits_opcode == 3'h6; // @[CacheCork.scala:113:53] wire nodeIn_c_ready = _nodeIn_c_ready_T ? _q_io_enq_ready : c_a_ready; // @[Decoupled.scala:362:21] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire d_grant = _in_d_bits_T_39 == 3'h5 | _in_d_bits_T_39 == 3'h4; // @[Mux.scala:30:73] wire [3:0] _GEN_0 = {_pool_io_alloc_valid, d_first_counter}; // @[Edges.scala:229:27, :231:25] assign nodeIn_d_valid = in_d_valid & ((|_GEN_0) | ~d_grant); // @[Edges.scala:231:25] wire in_d_ready = auto_in_d_ready & ((|_GEN_0) | ~d_grant); // @[Edges.scala:231:25] reg [2:0] nodeIn_d_bits_sink_r; // @[package.scala:88:63] wire [2:0] nodeIn_d_bits_sink = d_first ? _pool_io_alloc_bits : nodeIn_d_bits_sink_r; // @[package.scala:88:{42,63}] wire _GEN_1 = auto_out_d_bits_opcode == 3'h1 & auto_out_d_bits_source[0]; // @[CacheCork.scala:162:{33,51,71}] wire [2:0] d_d_bits_opcode = auto_out_d_bits_opcode == 3'h0 & ~(auto_out_d_bits_source[0]) ? 3'h6 : _GEN_1 ? 3'h5 : auto_out_d_bits_opcode; // @[CacheCork.scala:142:13, :162:{51,71,76}, :163:27, :166:{33,47,50,73}, :167:27] reg [2:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 3'h0; // @[Arbiter.scala:60:30, :61:28] wire winner_1 = ~winner_0 & a_a_valid; // @[CacheCork.scala:81:33, :102:33] wire _nodeOut_a_valid_T = winner_0 | a_a_valid; // @[CacheCork.scala:81:33, :102:33] reg state_0; // @[Arbiter.scala:88:26] reg state_1; // @[Arbiter.scala:88:26] wire muxState_0 = idle ? winner_0 : state_0; // @[CacheCork.scala:102:33] wire muxState_1 = idle ? winner_1 : state_1; // @[Arbiter.scala:61:28, :71:69, :88:26, :89:25] assign c_a_ready = auto_out_a_ready & (idle | state_0); // @[Arbiter.scala:61:28, :88:26, :92:24, :94:31] assign a_a_ready = auto_out_a_ready & (idle ? ~winner_0 : state_1); // @[CacheCork.scala:102:33] wire nodeOut_a_valid = idle ? _nodeOut_a_valid_T : state_0 & winner_0 | state_1 & a_a_valid; // @[Mux.scala:30:73] reg [2:0] beatsLeft_1; // @[Arbiter.scala:60:30] wire idle_1 = beatsLeft_1 == 3'h0; // @[Arbiter.scala:60:30, :61:28] wire _GEN_2 = _q_io_deq_valid | auto_out_d_valid; // @[Decoupled.scala:362:21] wire winner_1_1 = ~auto_out_d_valid & _q_io_deq_valid; // @[Decoupled.scala:362:21] wire winner_1_2 = ~_GEN_2 & _q_1_io_deq_valid; // @[Decoupled.scala:362:21] wire _in_d_valid_T = auto_out_d_valid | _q_io_deq_valid; // @[Decoupled.scala:362:21]
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TLBuffer_a32d64s5k3z4u( // @[Buffer.scala:40:9] input clock, // @[Buffer.scala:40:9] input reset, // @[Buffer.scala:40:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21] wire [2:0] _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] wire [1:0] _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21] wire [3:0] _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21] wire [4:0] _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21] wire [2:0] _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21] wire _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] wire _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21] TLMonitor_11 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (_nodeOut_a_q_io_enq_ready), // @[Decoupled.scala:362:21] .io_in_a_valid (auto_in_a_valid), .io_in_a_bits_opcode (auto_in_a_bits_opcode), .io_in_a_bits_param (auto_in_a_bits_param), .io_in_a_bits_size (auto_in_a_bits_size), .io_in_a_bits_source (auto_in_a_bits_source), .io_in_a_bits_address (auto_in_a_bits_address), .io_in_a_bits_mask (auto_in_a_bits_mask), .io_in_a_bits_corrupt (auto_in_a_bits_corrupt), .io_in_d_ready (auto_in_d_ready), .io_in_d_valid (_nodeIn_d_q_io_deq_valid), // @[Decoupled.scala:362:21] .io_in_d_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), // @[Decoupled.scala:362:21] .io_in_d_bits_param (_nodeIn_d_q_io_deq_bits_param), // @[Decoupled.scala:362:21] .io_in_d_bits_size (_nodeIn_d_q_io_deq_bits_size), // @[Decoupled.scala:362:21] .io_in_d_bits_source (_nodeIn_d_q_io_deq_bits_source), // @[Decoupled.scala:362:21] .io_in_d_bits_sink (_nodeIn_d_q_io_deq_bits_sink), // @[Decoupled.scala:362:21] .io_in_d_bits_denied (_nodeIn_d_q_io_deq_bits_denied), // @[Decoupled.scala:362:21] .io_in_d_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt) // @[Decoupled.scala:362:21] ); // @[Nodes.scala:27:25] Queue2_TLBundleA_a32d64s5k3z4u nodeOut_a_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (_nodeOut_a_q_io_enq_ready), .io_enq_valid (auto_in_a_valid), .io_enq_bits_opcode (auto_in_a_bits_opcode), .io_enq_bits_param (auto_in_a_bits_param), .io_enq_bits_size (auto_in_a_bits_size), .io_enq_bits_source (auto_in_a_bits_source), .io_enq_bits_address (auto_in_a_bits_address), .io_enq_bits_mask (auto_in_a_bits_mask), .io_enq_bits_data (auto_in_a_bits_data), .io_enq_bits_corrupt (auto_in_a_bits_corrupt), .io_deq_ready (auto_out_a_ready), .io_deq_valid (auto_out_a_valid), .io_deq_bits_opcode (auto_out_a_bits_opcode), .io_deq_bits_param (auto_out_a_bits_param), .io_deq_bits_size (auto_out_a_bits_size), .io_deq_bits_source (auto_out_a_bits_source), .io_deq_bits_address (auto_out_a_bits_address), .io_deq_bits_mask (auto_out_a_bits_mask), .io_deq_bits_data (auto_out_a_bits_data), .io_deq_bits_corrupt (auto_out_a_bits_corrupt) ); // @[Decoupled.scala:362:21] Queue2_TLBundleD_a32d64s5k3z4u nodeIn_d_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (auto_out_d_ready), .io_enq_valid (auto_out_d_valid), .io_enq_bits_opcode (auto_out_d_bits_opcode), .io_enq_bits_param (auto_out_d_bits_param), .io_enq_bits_size (auto_out_d_bits_size), .io_enq_bits_source (auto_out_d_bits_source), .io_enq_bits_sink (auto_out_d_bits_sink), .io_enq_bits_denied (auto_out_d_bits_denied), .io_enq_bits_data (auto_out_d_bits_data), .io_enq_bits_corrupt (auto_out_d_bits_corrupt), .io_deq_ready (auto_in_d_ready), .io_deq_valid (_nodeIn_d_q_io_deq_valid), .io_deq_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), .io_deq_bits_param (_nodeIn_d_q_io_deq_bits_param), .io_deq_bits_size (_nodeIn_d_q_io_deq_bits_size), .io_deq_bits_source (_nodeIn_d_q_io_deq_bits_source), .io_deq_bits_sink (_nodeIn_d_q_io_deq_bits_sink), .io_deq_bits_denied (_nodeIn_d_q_io_deq_bits_denied), .io_deq_bits_data (auto_in_d_bits_data), .io_deq_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt) ); // @[Decoupled.scala:362:21] assign auto_in_a_ready = _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21] assign auto_in_d_valid = _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21] assign auto_in_d_bits_opcode = _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21] assign auto_in_d_bits_param = _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21] assign auto_in_d_bits_size = _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21] assign auto_in_d_bits_source = _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21] assign auto_in_d_bits_sink = _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21] assign auto_in_d_bits_denied = _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21] assign auto_in_d_bits_corrupt = _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File RocketCore.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.withClock import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tile._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property import scala.collection.mutable.ArrayBuffer case class RocketCoreParams( xLen: Int = 64, pgLevels: Int = 3, // sv39 default bootFreqHz: BigInt = 0, useVM: Boolean = true, useUser: Boolean = false, useSupervisor: Boolean = false, useHypervisor: Boolean = false, useDebug: Boolean = true, useAtomics: Boolean = true, useAtomicsOnlyForIO: Boolean = false, useCompressed: Boolean = true, useRVE: Boolean = false, useConditionalZero: Boolean = false, useZba: Boolean = false, useZbb: Boolean = false, useZbs: Boolean = false, nLocalInterrupts: Int = 0, useNMI: Boolean = false, nBreakpoints: Int = 1, useBPWatch: Boolean = false, mcontextWidth: Int = 0, scontextWidth: Int = 0, nPMPs: Int = 8, nPerfCounters: Int = 0, haveBasicCounters: Boolean = true, haveCFlush: Boolean = false, misaWritable: Boolean = true, nL2TLBEntries: Int = 0, nL2TLBWays: Int = 1, nPTECacheEntries: Int = 8, mtvecInit: Option[BigInt] = Some(BigInt(0)), mtvecWritable: Boolean = true, fastLoadWord: Boolean = true, fastLoadByte: Boolean = false, branchPredictionModeCSR: Boolean = false, clockGate: Boolean = false, mvendorid: Int = 0, // 0 means non-commercial implementation mimpid: Int = 0x20181004, // release date in BCD mulDiv: Option[MulDivParams] = Some(MulDivParams()), fpu: Option[FPUParams] = Some(FPUParams()), debugROB: Option[DebugROBParams] = None, // if size < 1, SW ROB, else HW ROB haveCease: Boolean = true, // non-standard CEASE instruction haveSimTimeout: Boolean = true, // add plusarg for simulation timeout vector: Option[RocketCoreVectorParams] = None ) extends CoreParams { val lgPauseCycles = 5 val haveFSDirty = false val pmpGranularity: Int = if (useHypervisor) 4096 else 4 val fetchWidth: Int = if (useCompressed) 2 else 1 // fetchWidth doubled, but coreInstBytes halved, for RVC: val decodeWidth: Int = fetchWidth / (if (useCompressed) 2 else 1) val retireWidth: Int = 1 val instBits: Int = if (useCompressed) 16 else 32 val lrscCycles: Int = 80 // worst case is 14 mispredicted branches + slop val traceHasWdata: Boolean = debugROB.isDefined // ooo wb, so no wdata in trace override val useVector = vector.isDefined override val vectorUseDCache = vector.map(_.useDCache).getOrElse(false) override def vLen = vector.map(_.vLen).getOrElse(0) override def eLen = vector.map(_.eLen).getOrElse(0) override def vfLen = vector.map(_.vfLen).getOrElse(0) override def vfh = vector.map(_.vfh).getOrElse(false) override def vExts = vector.map(_.vExts).getOrElse(Nil) override def vMemDataBits = vector.map(_.vMemDataBits).getOrElse(0) override val customIsaExt = Option.when(haveCease)("xrocket") // CEASE instruction override def minFLen: Int = fpu.map(_.minFLen).getOrElse(32) override def customCSRs(implicit p: Parameters) = new RocketCustomCSRs } trait HasRocketCoreParameters extends HasCoreParameters { lazy val rocketParams: RocketCoreParams = tileParams.core.asInstanceOf[RocketCoreParams] val fastLoadWord = rocketParams.fastLoadWord val fastLoadByte = rocketParams.fastLoadByte val mulDivParams = rocketParams.mulDiv.getOrElse(MulDivParams()) // TODO ask andrew about this require(!fastLoadByte || fastLoadWord) require(!rocketParams.haveFSDirty, "rocket doesn't support setting fs dirty from outside, please disable haveFSDirty") } class RocketCustomCSRs(implicit p: Parameters) extends CustomCSRs with HasRocketCoreParameters { override def bpmCSR = { rocketParams.branchPredictionModeCSR.option(CustomCSR(bpmCSRId, BigInt(1), Some(BigInt(0)))) } private def haveDCache = tileParams.dcache.get.scratch.isEmpty override def chickenCSR = { val mask = BigInt( tileParams.dcache.get.clockGate.toInt << 0 | rocketParams.clockGate.toInt << 1 | rocketParams.clockGate.toInt << 2 | 1 << 3 | // disableSpeculativeICacheRefill haveDCache.toInt << 9 | // suppressCorruptOnGrantData tileParams.icache.get.prefetch.toInt << 17 ) Some(CustomCSR(chickenCSRId, mask, Some(mask))) } def disableICachePrefetch = getOrElse(chickenCSR, _.value(17), true.B) def marchid = CustomCSR.constant(CSRs.marchid, BigInt(1)) def mvendorid = CustomCSR.constant(CSRs.mvendorid, BigInt(rocketParams.mvendorid)) // mimpid encodes a release version in the form of a BCD-encoded datestamp. def mimpid = CustomCSR.constant(CSRs.mimpid, BigInt(rocketParams.mimpid)) override def decls = super.decls :+ marchid :+ mvendorid :+ mimpid } class CoreInterrupts(val hasBeu: Boolean)(implicit p: Parameters) extends TileInterrupts()(p) { val buserror = Option.when(hasBeu)(Bool()) } trait HasRocketCoreIO extends HasRocketCoreParameters { implicit val p: Parameters def nTotalRoCCCSRs: Int val io = IO(new CoreBundle()(p) { val hartid = Input(UInt(hartIdLen.W)) val reset_vector = Input(UInt(resetVectorLen.W)) val interrupts = Input(new CoreInterrupts(tileParams.asInstanceOf[RocketTileParams].beuAddr.isDefined)) val imem = new FrontendIO val dmem = new HellaCacheIO val ptw = Flipped(new DatapathPTWIO()) val fpu = Flipped(new FPUCoreIO()) val rocc = Flipped(new RoCCCoreIO(nTotalRoCCCSRs)) val trace = Output(new TraceBundle) val bpwatch = Output(Vec(coreParams.nBreakpoints, new BPWatch(coreParams.retireWidth))) val cease = Output(Bool()) val wfi = Output(Bool()) val traceStall = Input(Bool()) val vector = if (usingVector) Some(Flipped(new VectorCoreIO)) else None }) } class Rocket(tile: RocketTile)(implicit p: Parameters) extends CoreModule()(p) with HasRocketCoreParameters with HasRocketCoreIO { def nTotalRoCCCSRs = tile.roccCSRs.flatten.size import ALU._ val clock_en_reg = RegInit(true.B) val long_latency_stall = Reg(Bool()) val id_reg_pause = Reg(Bool()) val imem_might_request_reg = Reg(Bool()) val clock_en = WireDefault(true.B) val gated_clock = if (!rocketParams.clockGate) clock else ClockGate(clock, clock_en, "rocket_clock_gate") class RocketImpl { // entering gated-clock domain // performance counters def pipelineIDToWB[T <: Data](x: T): T = RegEnable(RegEnable(RegEnable(x, !ctrl_killd), ex_pc_valid), mem_pc_valid) val perfEvents = new EventSets(Seq( new EventSet((mask, hits) => Mux(wb_xcpt, mask(0), wb_valid && pipelineIDToWB((mask & hits).orR)), Seq( ("exception", () => false.B), ("load", () => id_ctrl.mem && id_ctrl.mem_cmd === M_XRD && !id_ctrl.fp), ("store", () => id_ctrl.mem && id_ctrl.mem_cmd === M_XWR && !id_ctrl.fp), ("amo", () => usingAtomics.B && id_ctrl.mem && (isAMO(id_ctrl.mem_cmd) || id_ctrl.mem_cmd.isOneOf(M_XLR, M_XSC))), ("system", () => id_ctrl.csr =/= CSR.N), ("arith", () => id_ctrl.wxd && !(id_ctrl.jal || id_ctrl.jalr || id_ctrl.mem || id_ctrl.fp || id_ctrl.mul || id_ctrl.div || id_ctrl.csr =/= CSR.N)), ("branch", () => id_ctrl.branch), ("jal", () => id_ctrl.jal), ("jalr", () => id_ctrl.jalr)) ++ (if (!usingMulDiv) Seq() else Seq( ("mul", () => if (pipelinedMul) id_ctrl.mul else id_ctrl.div && (id_ctrl.alu_fn & FN_DIV) =/= FN_DIV), ("div", () => if (pipelinedMul) id_ctrl.div else id_ctrl.div && (id_ctrl.alu_fn & FN_DIV) === FN_DIV))) ++ (if (!usingFPU) Seq() else Seq( ("fp load", () => id_ctrl.fp && io.fpu.dec.ldst && io.fpu.dec.wen), ("fp store", () => id_ctrl.fp && io.fpu.dec.ldst && !io.fpu.dec.wen), ("fp add", () => id_ctrl.fp && io.fpu.dec.fma && io.fpu.dec.swap23), ("fp mul", () => id_ctrl.fp && io.fpu.dec.fma && !io.fpu.dec.swap23 && !io.fpu.dec.ren3), ("fp mul-add", () => id_ctrl.fp && io.fpu.dec.fma && io.fpu.dec.ren3), ("fp div/sqrt", () => id_ctrl.fp && (io.fpu.dec.div || io.fpu.dec.sqrt)), ("fp other", () => id_ctrl.fp && !(io.fpu.dec.ldst || io.fpu.dec.fma || io.fpu.dec.div || io.fpu.dec.sqrt))))), new EventSet((mask, hits) => (mask & hits).orR, Seq( ("load-use interlock", () => id_ex_hazard && ex_ctrl.mem || id_mem_hazard && mem_ctrl.mem || id_wb_hazard && wb_ctrl.mem), ("long-latency interlock", () => id_sboard_hazard), ("csr interlock", () => id_ex_hazard && ex_ctrl.csr =/= CSR.N || id_mem_hazard && mem_ctrl.csr =/= CSR.N || id_wb_hazard && wb_ctrl.csr =/= CSR.N), ("I$ blocked", () => icache_blocked), ("D$ blocked", () => id_ctrl.mem && dcache_blocked), ("branch misprediction", () => take_pc_mem && mem_direction_misprediction), ("control-flow target misprediction", () => take_pc_mem && mem_misprediction && mem_cfi && !mem_direction_misprediction && !icache_blocked), ("flush", () => wb_reg_flush_pipe), ("replay", () => replay_wb)) ++ (if (!usingMulDiv) Seq() else Seq( ("mul/div interlock", () => id_ex_hazard && (ex_ctrl.mul || ex_ctrl.div) || id_mem_hazard && (mem_ctrl.mul || mem_ctrl.div) || id_wb_hazard && wb_ctrl.div))) ++ (if (!usingFPU) Seq() else Seq( ("fp interlock", () => id_ex_hazard && ex_ctrl.fp || id_mem_hazard && mem_ctrl.fp || id_wb_hazard && wb_ctrl.fp || id_ctrl.fp && id_stall_fpu)))), new EventSet((mask, hits) => (mask & hits).orR, Seq( ("I$ miss", () => io.imem.perf.acquire), ("D$ miss", () => io.dmem.perf.acquire), ("D$ release", () => io.dmem.perf.release), ("ITLB miss", () => io.imem.perf.tlbMiss), ("DTLB miss", () => io.dmem.perf.tlbMiss), ("L2 TLB miss", () => io.ptw.perf.l2miss))))) val pipelinedMul = usingMulDiv && mulDivParams.mulUnroll == xLen val decode_table = { (if (usingMulDiv) new MDecode(pipelinedMul) +: (xLen > 32).option(new M64Decode(pipelinedMul)).toSeq else Nil) ++: (if (usingAtomics) new ADecode +: (xLen > 32).option(new A64Decode).toSeq else Nil) ++: (if (fLen >= 32) new FDecode +: (xLen > 32).option(new F64Decode).toSeq else Nil) ++: (if (fLen >= 64) new DDecode +: (xLen > 32).option(new D64Decode).toSeq else Nil) ++: (if (minFLen == 16) new HDecode +: (xLen > 32).option(new H64Decode).toSeq ++: (fLen >= 64).option(new HDDecode).toSeq else Nil) ++: (usingRoCC.option(new RoCCDecode)) ++: (if (xLen == 32) new I32Decode else new I64Decode) +: (usingVM.option(new SVMDecode)) ++: (usingSupervisor.option(new SDecode)) ++: (usingHypervisor.option(new HypervisorDecode)) ++: ((usingHypervisor && (xLen == 64)).option(new Hypervisor64Decode)) ++: (usingDebug.option(new DebugDecode)) ++: (usingNMI.option(new NMIDecode)) ++: (usingConditionalZero.option(new ConditionalZeroDecode)) ++: Seq(new FenceIDecode(tile.dcache.flushOnFenceI)) ++: coreParams.haveCFlush.option(new CFlushDecode(tile.dcache.canSupportCFlushLine)) ++: rocketParams.haveCease.option(new CeaseDecode) ++: usingVector.option(new VCFGDecode) ++: (if (coreParams.useZba) new ZbaDecode +: (xLen > 32).option(new Zba64Decode).toSeq else Nil) ++: (if (coreParams.useZbb) Seq(new ZbbDecode, if (xLen == 32) new Zbb32Decode else new Zbb64Decode) else Nil) ++: coreParams.useZbs.option(new ZbsDecode) ++: Seq(new IDecode) } flatMap(_.table) val ex_ctrl = Reg(new IntCtrlSigs) val mem_ctrl = Reg(new IntCtrlSigs) val wb_ctrl = Reg(new IntCtrlSigs) val ex_reg_xcpt_interrupt = Reg(Bool()) val ex_reg_valid = Reg(Bool()) val ex_reg_rvc = Reg(Bool()) val ex_reg_btb_resp = Reg(new BTBResp) val ex_reg_xcpt = Reg(Bool()) val ex_reg_flush_pipe = Reg(Bool()) val ex_reg_load_use = Reg(Bool()) val ex_reg_cause = Reg(UInt()) val ex_reg_replay = Reg(Bool()) val ex_reg_pc = Reg(UInt()) val ex_reg_mem_size = Reg(UInt()) val ex_reg_hls = Reg(Bool()) val ex_reg_inst = Reg(Bits()) val ex_reg_raw_inst = Reg(UInt()) val ex_reg_wphit = Reg(Vec(nBreakpoints, Bool())) val ex_reg_set_vconfig = Reg(Bool()) val mem_reg_xcpt_interrupt = Reg(Bool()) val mem_reg_valid = Reg(Bool()) val mem_reg_rvc = Reg(Bool()) val mem_reg_btb_resp = Reg(new BTBResp) val mem_reg_xcpt = Reg(Bool()) val mem_reg_replay = Reg(Bool()) val mem_reg_flush_pipe = Reg(Bool()) val mem_reg_cause = Reg(UInt()) val mem_reg_slow_bypass = Reg(Bool()) val mem_reg_load = Reg(Bool()) val mem_reg_store = Reg(Bool()) val mem_reg_set_vconfig = Reg(Bool()) val mem_reg_sfence = Reg(Bool()) val mem_reg_pc = Reg(UInt()) val mem_reg_inst = Reg(Bits()) val mem_reg_mem_size = Reg(UInt()) val mem_reg_hls_or_dv = Reg(Bool()) val mem_reg_raw_inst = Reg(UInt()) val mem_reg_wdata = Reg(Bits()) val mem_reg_rs2 = Reg(Bits()) val mem_br_taken = Reg(Bool()) val take_pc_mem = Wire(Bool()) val mem_reg_wphit = Reg(Vec(nBreakpoints, Bool())) val wb_reg_valid = Reg(Bool()) val wb_reg_xcpt = Reg(Bool()) val wb_reg_replay = Reg(Bool()) val wb_reg_flush_pipe = Reg(Bool()) val wb_reg_cause = Reg(UInt()) val wb_reg_set_vconfig = Reg(Bool()) val wb_reg_sfence = Reg(Bool()) val wb_reg_pc = Reg(UInt()) val wb_reg_mem_size = Reg(UInt()) val wb_reg_hls_or_dv = Reg(Bool()) val wb_reg_hfence_v = Reg(Bool()) val wb_reg_hfence_g = Reg(Bool()) val wb_reg_inst = Reg(Bits()) val wb_reg_raw_inst = Reg(UInt()) val wb_reg_wdata = Reg(Bits()) val wb_reg_rs2 = Reg(Bits()) val take_pc_wb = Wire(Bool()) val wb_reg_wphit = Reg(Vec(nBreakpoints, Bool())) val take_pc_mem_wb = take_pc_wb || take_pc_mem val take_pc = take_pc_mem_wb // decode stage val ibuf = Module(new IBuf) val id_expanded_inst = ibuf.io.inst.map(_.bits.inst) val id_raw_inst = ibuf.io.inst.map(_.bits.raw) val id_inst = id_expanded_inst.map(_.bits) ibuf.io.imem <> io.imem.resp ibuf.io.kill := take_pc require(decodeWidth == 1 /* TODO */ && retireWidth == decodeWidth) require(!(coreParams.useRVE && coreParams.fpu.nonEmpty), "Can't select both RVE and floating-point") require(!(coreParams.useRVE && coreParams.useHypervisor), "Can't select both RVE and Hypervisor") val id_ctrl = Wire(new IntCtrlSigs).decode(id_inst(0), decode_table) val lgNXRegs = if (coreParams.useRVE) 4 else 5 val regAddrMask = (1 << lgNXRegs) - 1 def decodeReg(x: UInt) = (x.extract(x.getWidth-1, lgNXRegs).asBool, x(lgNXRegs-1, 0)) val (id_raddr3_illegal, id_raddr3) = decodeReg(id_expanded_inst(0).rs3) val (id_raddr2_illegal, id_raddr2) = decodeReg(id_expanded_inst(0).rs2) val (id_raddr1_illegal, id_raddr1) = decodeReg(id_expanded_inst(0).rs1) val (id_waddr_illegal, id_waddr) = decodeReg(id_expanded_inst(0).rd) val id_load_use = Wire(Bool()) val id_reg_fence = RegInit(false.B) val id_ren = IndexedSeq(id_ctrl.rxs1, id_ctrl.rxs2) val id_raddr = IndexedSeq(id_raddr1, id_raddr2) val rf = new RegFile(regAddrMask, xLen) val id_rs = id_raddr.map(rf.read _) val ctrl_killd = Wire(Bool()) val id_npc = (ibuf.io.pc.asSInt + ImmGen(IMM_UJ, id_inst(0))).asUInt val csr = Module(new CSRFile(perfEvents, coreParams.customCSRs.decls, tile.roccCSRs.flatten, tile.rocketParams.beuAddr.isDefined)) val id_csr_en = id_ctrl.csr.isOneOf(CSR.S, CSR.C, CSR.W) val id_system_insn = id_ctrl.csr === CSR.I val id_csr_ren = id_ctrl.csr.isOneOf(CSR.S, CSR.C) && id_expanded_inst(0).rs1 === 0.U val id_csr = Mux(id_system_insn && id_ctrl.mem, CSR.N, Mux(id_csr_ren, CSR.R, id_ctrl.csr)) val id_csr_flush = id_system_insn || (id_csr_en && !id_csr_ren && csr.io.decode(0).write_flush) val id_set_vconfig = Seq(Instructions.VSETVLI, Instructions.VSETIVLI, Instructions.VSETVL).map(_ === id_inst(0)).orR && usingVector.B id_ctrl.vec := false.B if (usingVector) { val v_decode = rocketParams.vector.get.decoder(p) v_decode.io.inst := id_inst(0) v_decode.io.vconfig := csr.io.vector.get.vconfig when (v_decode.io.legal) { id_ctrl.legal := !csr.io.vector.get.vconfig.vtype.vill id_ctrl.fp := v_decode.io.fp id_ctrl.rocc := false.B id_ctrl.branch := false.B id_ctrl.jal := false.B id_ctrl.jalr := false.B id_ctrl.rxs2 := v_decode.io.read_rs2 id_ctrl.rxs1 := v_decode.io.read_rs1 id_ctrl.mem := false.B id_ctrl.rfs1 := v_decode.io.read_frs1 id_ctrl.rfs2 := false.B id_ctrl.rfs3 := false.B id_ctrl.wfd := v_decode.io.write_frd id_ctrl.mul := false.B id_ctrl.div := false.B id_ctrl.wxd := v_decode.io.write_rd id_ctrl.csr := CSR.N id_ctrl.fence_i := false.B id_ctrl.fence := false.B id_ctrl.amo := false.B id_ctrl.dp := false.B id_ctrl.vec := true.B } } val id_illegal_insn = !id_ctrl.legal || (id_ctrl.mul || id_ctrl.div) && !csr.io.status.isa('m'-'a') || id_ctrl.amo && !csr.io.status.isa('a'-'a') || id_ctrl.fp && (csr.io.decode(0).fp_illegal || (io.fpu.illegal_rm && !id_ctrl.vec)) || (id_ctrl.vec) && (csr.io.decode(0).vector_illegal || csr.io.vector.map(_.vconfig.vtype.vill).getOrElse(false.B)) || id_ctrl.dp && !csr.io.status.isa('d'-'a') || ibuf.io.inst(0).bits.rvc && !csr.io.status.isa('c'-'a') || id_raddr2_illegal && id_ctrl.rxs2 || id_raddr1_illegal && id_ctrl.rxs1 || id_waddr_illegal && id_ctrl.wxd || id_ctrl.rocc && csr.io.decode(0).rocc_illegal || id_csr_en && (csr.io.decode(0).read_illegal || !id_csr_ren && csr.io.decode(0).write_illegal) || !ibuf.io.inst(0).bits.rvc && (id_system_insn && csr.io.decode(0).system_illegal) val id_virtual_insn = id_ctrl.legal && ((id_csr_en && !(!id_csr_ren && csr.io.decode(0).write_illegal) && csr.io.decode(0).virtual_access_illegal) || (!ibuf.io.inst(0).bits.rvc && id_system_insn && csr.io.decode(0).virtual_system_illegal)) // stall decode for fences (now, for AMO.rl; later, for AMO.aq and FENCE) val id_amo_aq = id_inst(0)(26) val id_amo_rl = id_inst(0)(25) val id_fence_pred = id_inst(0)(27,24) val id_fence_succ = id_inst(0)(23,20) val id_fence_next = id_ctrl.fence || id_ctrl.amo && id_amo_aq val id_mem_busy = !io.dmem.ordered || io.dmem.req.valid when (!id_mem_busy) { id_reg_fence := false.B } val id_rocc_busy = usingRoCC.B && (io.rocc.busy || ex_reg_valid && ex_ctrl.rocc || mem_reg_valid && mem_ctrl.rocc || wb_reg_valid && wb_ctrl.rocc) val id_csr_rocc_write = tile.roccCSRs.flatten.map(_.id.U === id_inst(0)(31,20)).orR && id_csr_en && !id_csr_ren val id_vec_busy = io.vector.map(v => v.backend_busy || v.trap_check_busy).getOrElse(false.B) val id_do_fence = WireDefault(id_rocc_busy && (id_ctrl.fence || id_csr_rocc_write) || id_vec_busy && id_ctrl.fence || id_mem_busy && (id_ctrl.amo && id_amo_rl || id_ctrl.fence_i || id_reg_fence && (id_ctrl.mem || id_ctrl.rocc))) val bpu = Module(new BreakpointUnit(nBreakpoints)) bpu.io.status := csr.io.status bpu.io.bp := csr.io.bp bpu.io.pc := ibuf.io.pc bpu.io.ea := mem_reg_wdata bpu.io.mcontext := csr.io.mcontext bpu.io.scontext := csr.io.scontext val id_xcpt0 = ibuf.io.inst(0).bits.xcpt0 val id_xcpt1 = ibuf.io.inst(0).bits.xcpt1 val (id_xcpt, id_cause) = checkExceptions(List( (csr.io.interrupt, csr.io.interrupt_cause), (bpu.io.debug_if, CSR.debugTriggerCause.U), (bpu.io.xcpt_if, Causes.breakpoint.U), (id_xcpt0.pf.inst, Causes.fetch_page_fault.U), (id_xcpt0.gf.inst, Causes.fetch_guest_page_fault.U), (id_xcpt0.ae.inst, Causes.fetch_access.U), (id_xcpt1.pf.inst, Causes.fetch_page_fault.U), (id_xcpt1.gf.inst, Causes.fetch_guest_page_fault.U), (id_xcpt1.ae.inst, Causes.fetch_access.U), (id_virtual_insn, Causes.virtual_instruction.U), (id_illegal_insn, Causes.illegal_instruction.U))) val idCoverCauses = List( (CSR.debugTriggerCause, "DEBUG_TRIGGER"), (Causes.breakpoint, "BREAKPOINT"), (Causes.fetch_access, "FETCH_ACCESS"), (Causes.illegal_instruction, "ILLEGAL_INSTRUCTION") ) ++ (if (usingVM) List( (Causes.fetch_page_fault, "FETCH_PAGE_FAULT") ) else Nil) coverExceptions(id_xcpt, id_cause, "DECODE", idCoverCauses) val dcache_bypass_data = if (fastLoadByte) io.dmem.resp.bits.data(xLen-1, 0) else if (fastLoadWord) io.dmem.resp.bits.data_word_bypass(xLen-1, 0) else wb_reg_wdata // detect bypass opportunities val ex_waddr = ex_reg_inst(11,7) & regAddrMask.U val mem_waddr = mem_reg_inst(11,7) & regAddrMask.U val wb_waddr = wb_reg_inst(11,7) & regAddrMask.U val bypass_sources = IndexedSeq( (true.B, 0.U, 0.U), // treat reading x0 as a bypass (ex_reg_valid && ex_ctrl.wxd, ex_waddr, mem_reg_wdata), (mem_reg_valid && mem_ctrl.wxd && !mem_ctrl.mem, mem_waddr, wb_reg_wdata), (mem_reg_valid && mem_ctrl.wxd, mem_waddr, dcache_bypass_data)) val id_bypass_src = id_raddr.map(raddr => bypass_sources.map(s => s._1 && s._2 === raddr)) // execute stage val bypass_mux = bypass_sources.map(_._3) val ex_reg_rs_bypass = Reg(Vec(id_raddr.size, Bool())) val ex_reg_rs_lsb = Reg(Vec(id_raddr.size, UInt(log2Ceil(bypass_sources.size).W))) val ex_reg_rs_msb = Reg(Vec(id_raddr.size, UInt())) val ex_rs = for (i <- 0 until id_raddr.size) yield Mux(ex_reg_rs_bypass(i), bypass_mux(ex_reg_rs_lsb(i)), Cat(ex_reg_rs_msb(i), ex_reg_rs_lsb(i))) val ex_imm = ImmGen(ex_ctrl.sel_imm, ex_reg_inst) val ex_rs1shl = Mux(ex_reg_inst(3), ex_rs(0)(31,0), ex_rs(0)) << ex_reg_inst(14,13) val ex_op1 = MuxLookup(ex_ctrl.sel_alu1, 0.S)(Seq( A1_RS1 -> ex_rs(0).asSInt, A1_PC -> ex_reg_pc.asSInt, A1_RS1SHL -> (if (rocketParams.useZba) ex_rs1shl.asSInt else 0.S) )) val ex_op2_oh = UIntToOH(Mux(ex_ctrl.sel_alu2(0), (ex_reg_inst >> 20).asUInt, ex_rs(1))(log2Ceil(xLen)-1,0)).asSInt val ex_op2 = MuxLookup(ex_ctrl.sel_alu2, 0.S)(Seq( A2_RS2 -> ex_rs(1).asSInt, A2_IMM -> ex_imm, A2_SIZE -> Mux(ex_reg_rvc, 2.S, 4.S), ) ++ (if (coreParams.useZbs) Seq( A2_RS2OH -> ex_op2_oh, A2_IMMOH -> ex_op2_oh, ) else Nil)) val (ex_new_vl, ex_new_vconfig) = if (usingVector) { val ex_new_vtype = VType.fromUInt(MuxCase(ex_rs(1), Seq( ex_reg_inst(31,30).andR -> ex_reg_inst(29,20), !ex_reg_inst(31) -> ex_reg_inst(30,20)))) val ex_avl = Mux(ex_ctrl.rxs1, Mux(ex_reg_inst(19,15) === 0.U, Mux(ex_reg_inst(11,7) === 0.U, csr.io.vector.get.vconfig.vl, ex_new_vtype.vlMax), ex_rs(0) ), ex_reg_inst(19,15)) val ex_new_vl = ex_new_vtype.vl(ex_avl, csr.io.vector.get.vconfig.vl, false.B, false.B, false.B) val ex_new_vconfig = Wire(new VConfig) ex_new_vconfig.vtype := ex_new_vtype ex_new_vconfig.vl := ex_new_vl (Some(ex_new_vl), Some(ex_new_vconfig)) } else { (None, None) } val alu = Module(new ALU) alu.io.dw := ex_ctrl.alu_dw alu.io.fn := ex_ctrl.alu_fn alu.io.in2 := ex_op2.asUInt alu.io.in1 := ex_op1.asUInt // multiplier and divider val div = Module(new MulDiv(if (pipelinedMul) mulDivParams.copy(mulUnroll = 0) else mulDivParams, width = xLen)) div.io.req.valid := ex_reg_valid && ex_ctrl.div div.io.req.bits.dw := ex_ctrl.alu_dw div.io.req.bits.fn := ex_ctrl.alu_fn div.io.req.bits.in1 := ex_rs(0) div.io.req.bits.in2 := ex_rs(1) div.io.req.bits.tag := ex_waddr val mul = pipelinedMul.option { val m = Module(new PipelinedMultiplier(xLen, 2)) m.io.req.valid := ex_reg_valid && ex_ctrl.mul m.io.req.bits := div.io.req.bits m } ex_reg_valid := !ctrl_killd ex_reg_replay := !take_pc && ibuf.io.inst(0).valid && ibuf.io.inst(0).bits.replay ex_reg_xcpt := !ctrl_killd && id_xcpt ex_reg_xcpt_interrupt := !take_pc && ibuf.io.inst(0).valid && csr.io.interrupt when (!ctrl_killd) { ex_ctrl := id_ctrl ex_reg_rvc := ibuf.io.inst(0).bits.rvc ex_ctrl.csr := id_csr when (id_ctrl.fence && id_fence_succ === 0.U) { id_reg_pause := true.B } when (id_fence_next) { id_reg_fence := true.B } when (id_xcpt) { // pass PC down ALU writeback pipeline for badaddr ex_ctrl.alu_fn := FN_ADD ex_ctrl.alu_dw := DW_XPR ex_ctrl.sel_alu1 := A1_RS1 // badaddr := instruction ex_ctrl.sel_alu2 := A2_ZERO when (id_xcpt1.asUInt.orR) { // badaddr := PC+2 ex_ctrl.sel_alu1 := A1_PC ex_ctrl.sel_alu2 := A2_SIZE ex_reg_rvc := true.B } when (bpu.io.xcpt_if || id_xcpt0.asUInt.orR) { // badaddr := PC ex_ctrl.sel_alu1 := A1_PC ex_ctrl.sel_alu2 := A2_ZERO } } ex_reg_flush_pipe := id_ctrl.fence_i || id_csr_flush ex_reg_load_use := id_load_use ex_reg_hls := usingHypervisor.B && id_system_insn && id_ctrl.mem_cmd.isOneOf(M_XRD, M_XWR, M_HLVX) ex_reg_mem_size := Mux(usingHypervisor.B && id_system_insn, id_inst(0)(27, 26), id_inst(0)(13, 12)) when (id_ctrl.mem_cmd.isOneOf(M_SFENCE, M_HFENCEV, M_HFENCEG, M_FLUSH_ALL)) { ex_reg_mem_size := Cat(id_raddr2 =/= 0.U, id_raddr1 =/= 0.U) } when (id_ctrl.mem_cmd === M_SFENCE && csr.io.status.v) { ex_ctrl.mem_cmd := M_HFENCEV } if (tile.dcache.flushOnFenceI) { when (id_ctrl.fence_i) { ex_reg_mem_size := 0.U } } for (i <- 0 until id_raddr.size) { val do_bypass = id_bypass_src(i).reduce(_||_) val bypass_src = PriorityEncoder(id_bypass_src(i)) ex_reg_rs_bypass(i) := do_bypass ex_reg_rs_lsb(i) := bypass_src when (id_ren(i) && !do_bypass) { ex_reg_rs_lsb(i) := id_rs(i)(log2Ceil(bypass_sources.size)-1, 0) ex_reg_rs_msb(i) := id_rs(i) >> log2Ceil(bypass_sources.size) } } when (id_illegal_insn || id_virtual_insn) { val inst = Mux(ibuf.io.inst(0).bits.rvc, id_raw_inst(0)(15, 0), id_raw_inst(0)) ex_reg_rs_bypass(0) := false.B ex_reg_rs_lsb(0) := inst(log2Ceil(bypass_sources.size)-1, 0) ex_reg_rs_msb(0) := inst >> log2Ceil(bypass_sources.size) } } when (!ctrl_killd || csr.io.interrupt || ibuf.io.inst(0).bits.replay) { ex_reg_cause := id_cause ex_reg_inst := id_inst(0) ex_reg_raw_inst := id_raw_inst(0) ex_reg_pc := ibuf.io.pc ex_reg_btb_resp := ibuf.io.btb_resp ex_reg_wphit := bpu.io.bpwatch.map { bpw => bpw.ivalid(0) } ex_reg_set_vconfig := id_set_vconfig && !id_xcpt } // replay inst in ex stage? val ex_pc_valid = ex_reg_valid || ex_reg_replay || ex_reg_xcpt_interrupt val wb_dcache_miss = wb_ctrl.mem && !io.dmem.resp.valid val replay_ex_structural = ex_ctrl.mem && !io.dmem.req.ready || ex_ctrl.div && !div.io.req.ready || ex_ctrl.vec && !io.vector.map(_.ex.ready).getOrElse(true.B) val replay_ex_load_use = wb_dcache_miss && ex_reg_load_use val replay_ex = ex_reg_replay || (ex_reg_valid && (replay_ex_structural || replay_ex_load_use)) val ctrl_killx = take_pc_mem_wb || replay_ex || !ex_reg_valid // detect 2-cycle load-use delay for LB/LH/SC val ex_slow_bypass = ex_ctrl.mem_cmd === M_XSC || ex_reg_mem_size < 2.U val ex_sfence = usingVM.B && ex_ctrl.mem && (ex_ctrl.mem_cmd === M_SFENCE || ex_ctrl.mem_cmd === M_HFENCEV || ex_ctrl.mem_cmd === M_HFENCEG) val (ex_xcpt, ex_cause) = checkExceptions(List( (ex_reg_xcpt_interrupt || ex_reg_xcpt, ex_reg_cause))) val exCoverCauses = idCoverCauses coverExceptions(ex_xcpt, ex_cause, "EXECUTE", exCoverCauses) // memory stage val mem_pc_valid = mem_reg_valid || mem_reg_replay || mem_reg_xcpt_interrupt val mem_br_target = mem_reg_pc.asSInt + Mux(mem_ctrl.branch && mem_br_taken, ImmGen(IMM_SB, mem_reg_inst), Mux(mem_ctrl.jal, ImmGen(IMM_UJ, mem_reg_inst), Mux(mem_reg_rvc, 2.S, 4.S))) val mem_npc = (Mux(mem_ctrl.jalr || mem_reg_sfence, encodeVirtualAddress(mem_reg_wdata, mem_reg_wdata).asSInt, mem_br_target) & (-2).S).asUInt val mem_wrong_npc = Mux(ex_pc_valid, mem_npc =/= ex_reg_pc, Mux(ibuf.io.inst(0).valid || ibuf.io.imem.valid, mem_npc =/= ibuf.io.pc, true.B)) val mem_npc_misaligned = !csr.io.status.isa('c'-'a') && mem_npc(1) && !mem_reg_sfence val mem_int_wdata = Mux(!mem_reg_xcpt && (mem_ctrl.jalr ^ mem_npc_misaligned), mem_br_target, mem_reg_wdata.asSInt).asUInt val mem_cfi = mem_ctrl.branch || mem_ctrl.jalr || mem_ctrl.jal val mem_cfi_taken = (mem_ctrl.branch && mem_br_taken) || mem_ctrl.jalr || mem_ctrl.jal val mem_direction_misprediction = mem_ctrl.branch && mem_br_taken =/= (usingBTB.B && mem_reg_btb_resp.taken) val mem_misprediction = if (usingBTB) mem_wrong_npc else mem_cfi_taken take_pc_mem := mem_reg_valid && !mem_reg_xcpt && (mem_misprediction || mem_reg_sfence) mem_reg_valid := !ctrl_killx mem_reg_replay := !take_pc_mem_wb && replay_ex mem_reg_xcpt := !ctrl_killx && ex_xcpt mem_reg_xcpt_interrupt := !take_pc_mem_wb && ex_reg_xcpt_interrupt // on pipeline flushes, cause mem_npc to hold the sequential npc, which // will drive the W-stage npc mux when (mem_reg_valid && mem_reg_flush_pipe) { mem_reg_sfence := false.B }.elsewhen (ex_pc_valid) { mem_ctrl := ex_ctrl mem_reg_rvc := ex_reg_rvc mem_reg_load := ex_ctrl.mem && isRead(ex_ctrl.mem_cmd) mem_reg_store := ex_ctrl.mem && isWrite(ex_ctrl.mem_cmd) mem_reg_sfence := ex_sfence mem_reg_btb_resp := ex_reg_btb_resp mem_reg_flush_pipe := ex_reg_flush_pipe mem_reg_slow_bypass := ex_slow_bypass mem_reg_wphit := ex_reg_wphit mem_reg_set_vconfig := ex_reg_set_vconfig mem_reg_cause := ex_cause mem_reg_inst := ex_reg_inst mem_reg_raw_inst := ex_reg_raw_inst mem_reg_mem_size := ex_reg_mem_size mem_reg_hls_or_dv := io.dmem.req.bits.dv mem_reg_pc := ex_reg_pc // IDecode ensured they are 1H mem_reg_wdata := Mux(ex_reg_set_vconfig, ex_new_vl.getOrElse(alu.io.out), alu.io.out) mem_br_taken := alu.io.cmp_out when (ex_ctrl.rxs2 && (ex_ctrl.mem || ex_ctrl.rocc || ex_sfence)) { val size = Mux(ex_ctrl.rocc, log2Ceil(xLen/8).U, ex_reg_mem_size) mem_reg_rs2 := new StoreGen(size, 0.U, ex_rs(1), coreDataBytes).data } if (usingVector) { when (ex_reg_set_vconfig) { mem_reg_rs2 := ex_new_vconfig.get.asUInt } } when (ex_ctrl.jalr && csr.io.status.debug) { // flush I$ on D-mode JALR to effect uncached fetch without D$ flush mem_ctrl.fence_i := true.B mem_reg_flush_pipe := true.B } } val mem_breakpoint = (mem_reg_load && bpu.io.xcpt_ld) || (mem_reg_store && bpu.io.xcpt_st) val mem_debug_breakpoint = (mem_reg_load && bpu.io.debug_ld) || (mem_reg_store && bpu.io.debug_st) val (mem_ldst_xcpt, mem_ldst_cause) = checkExceptions(List( (mem_debug_breakpoint, CSR.debugTriggerCause.U), (mem_breakpoint, Causes.breakpoint.U))) val (mem_xcpt, mem_cause) = checkExceptions(List( (mem_reg_xcpt_interrupt || mem_reg_xcpt, mem_reg_cause), (mem_reg_valid && mem_npc_misaligned, Causes.misaligned_fetch.U), (mem_reg_valid && mem_ldst_xcpt, mem_ldst_cause))) val memCoverCauses = (exCoverCauses ++ List( (CSR.debugTriggerCause, "DEBUG_TRIGGER"), (Causes.breakpoint, "BREAKPOINT"), (Causes.misaligned_fetch, "MISALIGNED_FETCH") )).distinct coverExceptions(mem_xcpt, mem_cause, "MEMORY", memCoverCauses) val dcache_kill_mem = mem_reg_valid && mem_ctrl.wxd && io.dmem.replay_next // structural hazard on writeback port val fpu_kill_mem = mem_reg_valid && mem_ctrl.fp && io.fpu.nack_mem val vec_kill_mem = mem_reg_valid && mem_ctrl.mem && io.vector.map(_.mem.block_mem).getOrElse(false.B) val vec_kill_all = mem_reg_valid && io.vector.map(_.mem.block_all).getOrElse(false.B) val replay_mem = dcache_kill_mem || mem_reg_replay || fpu_kill_mem || vec_kill_mem || vec_kill_all val killm_common = dcache_kill_mem || take_pc_wb || mem_reg_xcpt || !mem_reg_valid div.io.kill := killm_common && RegNext(div.io.req.fire) val ctrl_killm = killm_common || mem_xcpt || fpu_kill_mem || vec_kill_mem // writeback stage wb_reg_valid := !ctrl_killm wb_reg_replay := replay_mem && !take_pc_wb wb_reg_xcpt := mem_xcpt && !take_pc_wb && !io.vector.map(_.mem.block_all).getOrElse(false.B) wb_reg_flush_pipe := !ctrl_killm && mem_reg_flush_pipe when (mem_pc_valid) { wb_ctrl := mem_ctrl wb_reg_sfence := mem_reg_sfence wb_reg_wdata := Mux(!mem_reg_xcpt && mem_ctrl.fp && mem_ctrl.wxd, io.fpu.toint_data, mem_int_wdata) when (mem_ctrl.rocc || mem_reg_sfence || mem_reg_set_vconfig) { wb_reg_rs2 := mem_reg_rs2 } wb_reg_cause := mem_cause wb_reg_inst := mem_reg_inst wb_reg_raw_inst := mem_reg_raw_inst wb_reg_mem_size := mem_reg_mem_size wb_reg_hls_or_dv := mem_reg_hls_or_dv wb_reg_hfence_v := mem_ctrl.mem_cmd === M_HFENCEV wb_reg_hfence_g := mem_ctrl.mem_cmd === M_HFENCEG wb_reg_pc := mem_reg_pc wb_reg_wphit := mem_reg_wphit | bpu.io.bpwatch.map { bpw => (bpw.rvalid(0) && mem_reg_load) || (bpw.wvalid(0) && mem_reg_store) } wb_reg_set_vconfig := mem_reg_set_vconfig } val (wb_xcpt, wb_cause) = checkExceptions(List( (wb_reg_xcpt, wb_reg_cause), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.pf.st, Causes.store_page_fault.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.pf.ld, Causes.load_page_fault.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.gf.st, Causes.store_guest_page_fault.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.gf.ld, Causes.load_guest_page_fault.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ae.st, Causes.store_access.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ae.ld, Causes.load_access.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ma.st, Causes.misaligned_store.U), (wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ma.ld, Causes.misaligned_load.U) )) val wbCoverCauses = List( (Causes.misaligned_store, "MISALIGNED_STORE"), (Causes.misaligned_load, "MISALIGNED_LOAD"), (Causes.store_access, "STORE_ACCESS"), (Causes.load_access, "LOAD_ACCESS") ) ++ (if(usingVM) List( (Causes.store_page_fault, "STORE_PAGE_FAULT"), (Causes.load_page_fault, "LOAD_PAGE_FAULT") ) else Nil) ++ (if (usingHypervisor) List( (Causes.store_guest_page_fault, "STORE_GUEST_PAGE_FAULT"), (Causes.load_guest_page_fault, "LOAD_GUEST_PAGE_FAULT"), ) else Nil) coverExceptions(wb_xcpt, wb_cause, "WRITEBACK", wbCoverCauses) val wb_pc_valid = wb_reg_valid || wb_reg_replay || wb_reg_xcpt val wb_wxd = wb_reg_valid && wb_ctrl.wxd val wb_set_sboard = wb_ctrl.div || wb_dcache_miss || wb_ctrl.rocc || wb_ctrl.vec val replay_wb_common = io.dmem.s2_nack || wb_reg_replay val replay_wb_rocc = wb_reg_valid && wb_ctrl.rocc && !io.rocc.cmd.ready val replay_wb_csr: Bool = wb_reg_valid && csr.io.rw_stall val replay_wb_vec = wb_reg_valid && io.vector.map(_.wb.replay).getOrElse(false.B) val replay_wb = replay_wb_common || replay_wb_rocc || replay_wb_csr || replay_wb_vec take_pc_wb := replay_wb || wb_xcpt || csr.io.eret || wb_reg_flush_pipe // writeback arbitration val dmem_resp_xpu = !io.dmem.resp.bits.tag(0).asBool val dmem_resp_fpu = io.dmem.resp.bits.tag(0).asBool val dmem_resp_waddr = io.dmem.resp.bits.tag(5, 1) val dmem_resp_valid = io.dmem.resp.valid && io.dmem.resp.bits.has_data val dmem_resp_replay = dmem_resp_valid && io.dmem.resp.bits.replay class LLWB extends Bundle { val data = UInt(xLen.W) val tag = UInt(5.W) } val ll_arb = Module(new Arbiter(new LLWB, 3)) // div, rocc, vec ll_arb.io.in.foreach(_.valid := false.B) ll_arb.io.in.foreach(_.bits := DontCare) val ll_wdata = WireInit(ll_arb.io.out.bits.data) val ll_waddr = WireInit(ll_arb.io.out.bits.tag) val ll_wen = WireInit(ll_arb.io.out.fire) ll_arb.io.out.ready := !wb_wxd div.io.resp.ready := ll_arb.io.in(0).ready ll_arb.io.in(0).valid := div.io.resp.valid ll_arb.io.in(0).bits.data := div.io.resp.bits.data ll_arb.io.in(0).bits.tag := div.io.resp.bits.tag if (usingRoCC) { io.rocc.resp.ready := ll_arb.io.in(1).ready ll_arb.io.in(1).valid := io.rocc.resp.valid ll_arb.io.in(1).bits.data := io.rocc.resp.bits.data ll_arb.io.in(1).bits.tag := io.rocc.resp.bits.rd } else { // tie off RoCC io.rocc.resp.ready := false.B io.rocc.mem.req.ready := false.B } io.vector.map { v => v.resp.ready := Mux(v.resp.bits.fp, !(dmem_resp_valid && dmem_resp_fpu), ll_arb.io.in(2).ready) ll_arb.io.in(2).valid := v.resp.valid && !v.resp.bits.fp ll_arb.io.in(2).bits.data := v.resp.bits.data ll_arb.io.in(2).bits.tag := v.resp.bits.rd } // Dont care mem since not all RoCC need accessing memory io.rocc.mem := DontCare when (dmem_resp_replay && dmem_resp_xpu) { ll_arb.io.out.ready := false.B ll_waddr := dmem_resp_waddr ll_wen := true.B } val wb_valid = wb_reg_valid && !replay_wb && !wb_xcpt val wb_wen = wb_valid && wb_ctrl.wxd val rf_wen = wb_wen || ll_wen val rf_waddr = Mux(ll_wen, ll_waddr, wb_waddr) val rf_wdata = Mux(dmem_resp_valid && dmem_resp_xpu, io.dmem.resp.bits.data(xLen-1, 0), Mux(ll_wen, ll_wdata, Mux(wb_ctrl.csr =/= CSR.N, csr.io.rw.rdata, Mux(wb_ctrl.mul, mul.map(_.io.resp.bits.data).getOrElse(wb_reg_wdata), wb_reg_wdata)))) when (rf_wen) { rf.write(rf_waddr, rf_wdata) } // hook up control/status regfile csr.io.ungated_clock := clock csr.io.decode(0).inst := id_inst(0) csr.io.exception := wb_xcpt csr.io.cause := wb_cause csr.io.retire := wb_valid csr.io.inst(0) := (if (usingCompressed) Cat(Mux(wb_reg_raw_inst(1, 0).andR, wb_reg_inst >> 16, 0.U), wb_reg_raw_inst(15, 0)) else wb_reg_inst) csr.io.interrupts := io.interrupts csr.io.hartid := io.hartid io.fpu.fcsr_rm := csr.io.fcsr_rm val vector_fcsr_flags = io.vector.map(_.set_fflags.bits).getOrElse(0.U(5.W)) val vector_fcsr_flags_valid = io.vector.map(_.set_fflags.valid).getOrElse(false.B) csr.io.fcsr_flags.valid := io.fpu.fcsr_flags.valid | vector_fcsr_flags_valid csr.io.fcsr_flags.bits := (io.fpu.fcsr_flags.bits & Fill(5, io.fpu.fcsr_flags.valid)) | (vector_fcsr_flags & Fill(5, vector_fcsr_flags_valid)) io.fpu.time := csr.io.time(31,0) io.fpu.hartid := io.hartid csr.io.rocc_interrupt := io.rocc.interrupt csr.io.pc := wb_reg_pc val tval_dmem_addr = !wb_reg_xcpt val tval_any_addr = tval_dmem_addr || wb_reg_cause.isOneOf(Causes.breakpoint.U, Causes.fetch_access.U, Causes.fetch_page_fault.U, Causes.fetch_guest_page_fault.U) val tval_inst = wb_reg_cause === Causes.illegal_instruction.U val tval_valid = wb_xcpt && (tval_any_addr || tval_inst) csr.io.gva := wb_xcpt && (tval_any_addr && csr.io.status.v || tval_dmem_addr && wb_reg_hls_or_dv) csr.io.tval := Mux(tval_valid, encodeVirtualAddress(wb_reg_wdata, wb_reg_wdata), 0.U) val (htval, mhtinst_read_pseudo) = { val htval_valid_imem = wb_reg_xcpt && wb_reg_cause === Causes.fetch_guest_page_fault.U val htval_imem = Mux(htval_valid_imem, io.imem.gpa.bits, 0.U) assert(!htval_valid_imem || io.imem.gpa.valid) val htval_valid_dmem = wb_xcpt && tval_dmem_addr && io.dmem.s2_xcpt.gf.asUInt.orR && !io.dmem.s2_xcpt.pf.asUInt.orR val htval_dmem = Mux(htval_valid_dmem, io.dmem.s2_gpa, 0.U) val htval = (htval_dmem | htval_imem) >> hypervisorExtraAddrBits // read pseudoinstruction if a guest-page fault is caused by an implicit memory access for VS-stage address translation val mhtinst_read_pseudo = (io.imem.gpa_is_pte && htval_valid_imem) || (io.dmem.s2_gpa_is_pte && htval_valid_dmem) (htval, mhtinst_read_pseudo) } csr.io.vector.foreach { v => v.set_vconfig.valid := wb_reg_set_vconfig && wb_reg_valid v.set_vconfig.bits := wb_reg_rs2.asTypeOf(new VConfig) v.set_vs_dirty := wb_valid && wb_ctrl.vec v.set_vstart.valid := wb_valid && wb_reg_set_vconfig v.set_vstart.bits := 0.U } io.vector.foreach { v => when (v.wb.retire || v.wb.xcpt || wb_ctrl.vec) { csr.io.pc := v.wb.pc csr.io.retire := v.wb.retire csr.io.inst(0) := v.wb.inst when (v.wb.xcpt && !wb_reg_xcpt) { wb_xcpt := true.B wb_cause := v.wb.cause csr.io.tval := v.wb.tval } } v.wb.store_pending := io.dmem.store_pending v.wb.vxrm := csr.io.vector.get.vxrm v.wb.frm := csr.io.fcsr_rm csr.io.vector.get.set_vxsat := v.set_vxsat when (v.set_vconfig.valid) { csr.io.vector.get.set_vconfig.valid := true.B csr.io.vector.get.set_vconfig.bits := v.set_vconfig.bits } when (v.set_vstart.valid) { csr.io.vector.get.set_vstart.valid := true.B csr.io.vector.get.set_vstart.bits := v.set_vstart.bits } } csr.io.htval := htval csr.io.mhtinst_read_pseudo := mhtinst_read_pseudo io.ptw.ptbr := csr.io.ptbr io.ptw.hgatp := csr.io.hgatp io.ptw.vsatp := csr.io.vsatp (io.ptw.customCSRs.csrs zip csr.io.customCSRs).map { case (lhs, rhs) => lhs <> rhs } io.ptw.status := csr.io.status io.ptw.hstatus := csr.io.hstatus io.ptw.gstatus := csr.io.gstatus io.ptw.pmp := csr.io.pmp csr.io.rw.addr := wb_reg_inst(31,20) csr.io.rw.cmd := CSR.maskCmd(wb_reg_valid, wb_ctrl.csr) csr.io.rw.wdata := wb_reg_wdata io.rocc.csrs <> csr.io.roccCSRs io.trace.time := csr.io.time io.trace.insns := csr.io.trace if (rocketParams.debugROB.isDefined) { val sz = rocketParams.debugROB.get.size if (sz < 1) { // use unsynthesizable ROB val csr_trace_with_wdata = WireInit(csr.io.trace(0)) csr_trace_with_wdata.wdata.get := rf_wdata val should_wb = WireInit((wb_ctrl.wfd || (wb_ctrl.wxd && wb_waddr =/= 0.U)) && !csr.io.trace(0).exception) val has_wb = WireInit(wb_ctrl.wxd && wb_wen && !wb_set_sboard) val wb_addr = WireInit(wb_waddr + Mux(wb_ctrl.wfd, 32.U, 0.U)) io.vector.foreach { v => when (v.wb.retire) { should_wb := v.wb.rob_should_wb has_wb := false.B wb_addr := Cat(v.wb.rob_should_wb_fp, csr_trace_with_wdata.insn(11,7)) }} DebugROB.pushTrace(clock, reset, io.hartid, csr_trace_with_wdata, should_wb, has_wb, wb_addr) io.trace.insns(0) := DebugROB.popTrace(clock, reset, io.hartid) DebugROB.pushWb(clock, reset, io.hartid, ll_wen, rf_waddr, rf_wdata) } else { // synthesizable ROB (no FPRs) require(!usingVector, "Synthesizable ROB does not support vector implementations") val csr_trace_with_wdata = WireInit(csr.io.trace(0)) csr_trace_with_wdata.wdata.get := rf_wdata val debug_rob = Module(new HardDebugROB(sz, 32)) debug_rob.io.i_insn := csr_trace_with_wdata debug_rob.io.should_wb := (wb_ctrl.wfd || (wb_ctrl.wxd && wb_waddr =/= 0.U)) && !csr.io.trace(0).exception debug_rob.io.has_wb := wb_ctrl.wxd && wb_wen && !wb_set_sboard debug_rob.io.tag := wb_waddr + Mux(wb_ctrl.wfd, 32.U, 0.U) debug_rob.io.wb_val := ll_wen debug_rob.io.wb_tag := rf_waddr debug_rob.io.wb_data := rf_wdata io.trace.insns(0) := debug_rob.io.o_insn } } else { io.trace.insns := csr.io.trace } for (((iobpw, wphit), bp) <- io.bpwatch zip wb_reg_wphit zip csr.io.bp) { iobpw.valid(0) := wphit iobpw.action := bp.control.action // tie off bpwatch valids iobpw.rvalid.foreach(_ := false.B) iobpw.wvalid.foreach(_ := false.B) iobpw.ivalid.foreach(_ := false.B) } val hazard_targets = Seq((id_ctrl.rxs1 && id_raddr1 =/= 0.U, id_raddr1), (id_ctrl.rxs2 && id_raddr2 =/= 0.U, id_raddr2), (id_ctrl.wxd && id_waddr =/= 0.U, id_waddr)) val fp_hazard_targets = Seq((io.fpu.dec.ren1, id_raddr1), (io.fpu.dec.ren2, id_raddr2), (io.fpu.dec.ren3, id_raddr3), (io.fpu.dec.wen, id_waddr)) val sboard = new Scoreboard(32, true) sboard.clear(ll_wen, ll_waddr) def id_sboard_clear_bypass(r: UInt) = { // ll_waddr arrives late when D$ has ECC, so reshuffle the hazard check if (!tileParams.dcache.get.dataECC.isDefined) ll_wen && ll_waddr === r else div.io.resp.fire && div.io.resp.bits.tag === r || dmem_resp_replay && dmem_resp_xpu && dmem_resp_waddr === r } val id_sboard_hazard = checkHazards(hazard_targets, rd => sboard.read(rd) && !id_sboard_clear_bypass(rd)) sboard.set(wb_set_sboard && wb_wen, wb_waddr) // stall for RAW/WAW hazards on CSRs, loads, AMOs, and mul/div in execute stage. val ex_cannot_bypass = ex_ctrl.csr =/= CSR.N || ex_ctrl.jalr || ex_ctrl.mem || ex_ctrl.mul || ex_ctrl.div || ex_ctrl.fp || ex_ctrl.rocc || ex_ctrl.vec val data_hazard_ex = ex_ctrl.wxd && checkHazards(hazard_targets, _ === ex_waddr) val fp_data_hazard_ex = id_ctrl.fp && ex_ctrl.wfd && checkHazards(fp_hazard_targets, _ === ex_waddr) val id_ex_hazard = ex_reg_valid && (data_hazard_ex && ex_cannot_bypass || fp_data_hazard_ex) // stall for RAW/WAW hazards on CSRs, LB/LH, and mul/div in memory stage. val mem_mem_cmd_bh = if (fastLoadWord) (!fastLoadByte).B && mem_reg_slow_bypass else true.B val mem_cannot_bypass = mem_ctrl.csr =/= CSR.N || mem_ctrl.mem && mem_mem_cmd_bh || mem_ctrl.mul || mem_ctrl.div || mem_ctrl.fp || mem_ctrl.rocc || mem_ctrl.vec val data_hazard_mem = mem_ctrl.wxd && checkHazards(hazard_targets, _ === mem_waddr) val fp_data_hazard_mem = id_ctrl.fp && mem_ctrl.wfd && checkHazards(fp_hazard_targets, _ === mem_waddr) val id_mem_hazard = mem_reg_valid && (data_hazard_mem && mem_cannot_bypass || fp_data_hazard_mem) id_load_use := mem_reg_valid && data_hazard_mem && mem_ctrl.mem val id_vconfig_hazard = id_ctrl.vec && ( (ex_reg_valid && ex_reg_set_vconfig) || (mem_reg_valid && mem_reg_set_vconfig) || (wb_reg_valid && wb_reg_set_vconfig)) // stall for RAW/WAW hazards on load/AMO misses and mul/div in writeback. val data_hazard_wb = wb_ctrl.wxd && checkHazards(hazard_targets, _ === wb_waddr) val fp_data_hazard_wb = id_ctrl.fp && wb_ctrl.wfd && checkHazards(fp_hazard_targets, _ === wb_waddr) val id_wb_hazard = wb_reg_valid && (data_hazard_wb && wb_set_sboard || fp_data_hazard_wb) val id_stall_fpu = if (usingFPU) { val fp_sboard = new Scoreboard(32) fp_sboard.set(((wb_dcache_miss || wb_ctrl.vec) && wb_ctrl.wfd || io.fpu.sboard_set) && wb_valid, wb_waddr) val v_ll = io.vector.map(v => v.resp.fire && v.resp.bits.fp).getOrElse(false.B) fp_sboard.clear((dmem_resp_replay && dmem_resp_fpu) || v_ll, io.fpu.ll_resp_tag) fp_sboard.clear(io.fpu.sboard_clr, io.fpu.sboard_clra) checkHazards(fp_hazard_targets, fp_sboard.read _) } else false.B val dcache_blocked = { // speculate that a blocked D$ will unblock the cycle after a Grant val blocked = Reg(Bool()) blocked := !io.dmem.req.ready && io.dmem.clock_enabled && !io.dmem.perf.grant && (blocked || io.dmem.req.valid || io.dmem.s2_nack) blocked && !io.dmem.perf.grant } val rocc_blocked = Reg(Bool()) rocc_blocked := !wb_xcpt && !io.rocc.cmd.ready && (io.rocc.cmd.valid || rocc_blocked) val ctrl_stalld = id_ex_hazard || id_mem_hazard || id_wb_hazard || id_sboard_hazard || id_vconfig_hazard || csr.io.singleStep && (ex_reg_valid || mem_reg_valid || wb_reg_valid) || id_csr_en && csr.io.decode(0).fp_csr && !io.fpu.fcsr_rdy || id_csr_en && csr.io.decode(0).vector_csr && id_vec_busy || id_ctrl.fp && id_stall_fpu || id_ctrl.mem && dcache_blocked || // reduce activity during D$ misses id_ctrl.rocc && rocc_blocked || // reduce activity while RoCC is busy id_ctrl.div && (!(div.io.req.ready || (div.io.resp.valid && !wb_wxd)) || div.io.req.valid) || // reduce odds of replay !clock_en || id_do_fence || csr.io.csr_stall || id_reg_pause || io.traceStall ctrl_killd := !ibuf.io.inst(0).valid || ibuf.io.inst(0).bits.replay || take_pc_mem_wb || ctrl_stalld || csr.io.interrupt io.imem.req.valid := take_pc io.imem.req.bits.speculative := !take_pc_wb io.imem.req.bits.pc := Mux(wb_xcpt || csr.io.eret, csr.io.evec, // exception or [m|s]ret Mux(replay_wb, wb_reg_pc, // replay mem_npc)) // flush or branch misprediction io.imem.flush_icache := wb_reg_valid && wb_ctrl.fence_i && !io.dmem.s2_nack io.imem.might_request := { imem_might_request_reg := ex_pc_valid || mem_pc_valid || io.ptw.customCSRs.disableICacheClockGate || io.vector.map(_.trap_check_busy).getOrElse(false.B) imem_might_request_reg } io.imem.progress := RegNext(wb_reg_valid && !replay_wb_common) io.imem.sfence.valid := wb_reg_valid && wb_reg_sfence io.imem.sfence.bits.rs1 := wb_reg_mem_size(0) io.imem.sfence.bits.rs2 := wb_reg_mem_size(1) io.imem.sfence.bits.addr := wb_reg_wdata io.imem.sfence.bits.asid := wb_reg_rs2 io.imem.sfence.bits.hv := wb_reg_hfence_v io.imem.sfence.bits.hg := wb_reg_hfence_g io.ptw.sfence := io.imem.sfence ibuf.io.inst(0).ready := !ctrl_stalld io.imem.btb_update.valid := mem_reg_valid && !take_pc_wb && mem_wrong_npc && (!mem_cfi || mem_cfi_taken) io.imem.btb_update.bits.isValid := mem_cfi io.imem.btb_update.bits.cfiType := Mux((mem_ctrl.jal || mem_ctrl.jalr) && mem_waddr(0), CFIType.call, Mux(mem_ctrl.jalr && (mem_reg_inst(19,15) & regAddrMask.U) === BitPat("b00?01"), CFIType.ret, Mux(mem_ctrl.jal || mem_ctrl.jalr, CFIType.jump, CFIType.branch))) io.imem.btb_update.bits.target := io.imem.req.bits.pc io.imem.btb_update.bits.br_pc := (if (usingCompressed) mem_reg_pc + Mux(mem_reg_rvc, 0.U, 2.U) else mem_reg_pc) io.imem.btb_update.bits.pc := ~(~io.imem.btb_update.bits.br_pc | (coreInstBytes*fetchWidth-1).U) io.imem.btb_update.bits.prediction := mem_reg_btb_resp io.imem.btb_update.bits.taken := DontCare io.imem.bht_update.valid := mem_reg_valid && !take_pc_wb io.imem.bht_update.bits.pc := io.imem.btb_update.bits.pc io.imem.bht_update.bits.taken := mem_br_taken io.imem.bht_update.bits.mispredict := mem_wrong_npc io.imem.bht_update.bits.branch := mem_ctrl.branch io.imem.bht_update.bits.prediction := mem_reg_btb_resp.bht // Connect RAS in Frontend io.imem.ras_update := DontCare io.fpu.valid := !ctrl_killd && id_ctrl.fp io.fpu.killx := ctrl_killx io.fpu.killm := killm_common io.fpu.inst := id_inst(0) io.fpu.fromint_data := ex_rs(0) io.fpu.ll_resp_val := dmem_resp_valid && dmem_resp_fpu io.fpu.ll_resp_data := (if (minFLen == 32) io.dmem.resp.bits.data_word_bypass else io.dmem.resp.bits.data) io.fpu.ll_resp_type := io.dmem.resp.bits.size io.fpu.ll_resp_tag := dmem_resp_waddr io.fpu.keep_clock_enabled := io.ptw.customCSRs.disableCoreClockGate io.fpu.v_sew := csr.io.vector.map(_.vconfig.vtype.vsew).getOrElse(0.U) io.vector.map { v => when (!(dmem_resp_valid && dmem_resp_fpu)) { io.fpu.ll_resp_val := v.resp.valid && v.resp.bits.fp io.fpu.ll_resp_data := v.resp.bits.data io.fpu.ll_resp_type := v.resp.bits.size io.fpu.ll_resp_tag := v.resp.bits.rd } } io.vector.foreach { v => v.ex.valid := ex_reg_valid && (ex_ctrl.vec || rocketParams.vector.get.issueVConfig.B && ex_reg_set_vconfig) && !ctrl_killx v.ex.inst := ex_reg_inst v.ex.vconfig := csr.io.vector.get.vconfig v.ex.vstart := Mux(mem_reg_valid && mem_ctrl.vec || wb_reg_valid && wb_ctrl.vec, 0.U, csr.io.vector.get.vstart) v.ex.rs1 := ex_rs(0) v.ex.rs2 := ex_rs(1) v.ex.pc := ex_reg_pc v.mem.frs1 := io.fpu.store_data v.killm := killm_common v.status := csr.io.status } io.dmem.req.valid := ex_reg_valid && ex_ctrl.mem val ex_dcache_tag = Cat(ex_waddr, ex_ctrl.fp) require(coreParams.dcacheReqTagBits >= ex_dcache_tag.getWidth) io.dmem.req.bits.tag := ex_dcache_tag io.dmem.req.bits.cmd := ex_ctrl.mem_cmd io.dmem.req.bits.size := ex_reg_mem_size io.dmem.req.bits.signed := !Mux(ex_reg_hls, ex_reg_inst(20), ex_reg_inst(14)) io.dmem.req.bits.phys := false.B io.dmem.req.bits.addr := encodeVirtualAddress(ex_rs(0), alu.io.adder_out) io.dmem.req.bits.idx.foreach(_ := io.dmem.req.bits.addr) io.dmem.req.bits.dprv := Mux(ex_reg_hls, csr.io.hstatus.spvp, csr.io.status.dprv) io.dmem.req.bits.dv := ex_reg_hls || csr.io.status.dv io.dmem.req.bits.no_resp := !isRead(ex_ctrl.mem_cmd) || (!ex_ctrl.fp && ex_waddr === 0.U) io.dmem.req.bits.no_alloc := DontCare io.dmem.req.bits.no_xcpt := DontCare io.dmem.req.bits.data := DontCare io.dmem.req.bits.mask := DontCare io.dmem.s1_data.data := (if (fLen == 0) mem_reg_rs2 else Mux(mem_ctrl.fp, Fill(coreDataBits / fLen, io.fpu.store_data), mem_reg_rs2)) io.dmem.s1_data.mask := DontCare io.dmem.s1_kill := killm_common || mem_ldst_xcpt || fpu_kill_mem || vec_kill_mem io.dmem.s2_kill := false.B // don't let D$ go to sleep if we're probably going to use it soon io.dmem.keep_clock_enabled := ibuf.io.inst(0).valid && id_ctrl.mem && !csr.io.csr_stall io.rocc.cmd.valid := wb_reg_valid && wb_ctrl.rocc && !replay_wb_common io.rocc.exception := wb_xcpt && csr.io.status.xs.orR io.rocc.cmd.bits.status := csr.io.status io.rocc.cmd.bits.inst := wb_reg_inst.asTypeOf(new RoCCInstruction()) io.rocc.cmd.bits.rs1 := wb_reg_wdata io.rocc.cmd.bits.rs2 := wb_reg_rs2 // gate the clock val unpause = csr.io.time(rocketParams.lgPauseCycles-1, 0) === 0.U || csr.io.inhibit_cycle || io.dmem.perf.release || take_pc when (unpause) { id_reg_pause := false.B } io.cease := csr.io.status.cease && !clock_en_reg io.wfi := csr.io.status.wfi if (rocketParams.clockGate) { long_latency_stall := csr.io.csr_stall || io.dmem.perf.blocked || id_reg_pause && !unpause clock_en := clock_en_reg || ex_pc_valid || (!long_latency_stall && io.imem.resp.valid) clock_en_reg := ex_pc_valid || mem_pc_valid || wb_pc_valid || // instruction in flight io.ptw.customCSRs.disableCoreClockGate || // chicken bit !div.io.req.ready || // mul/div in flight usingFPU.B && !io.fpu.fcsr_rdy || // long-latency FPU in flight io.dmem.replay_next || // long-latency load replaying (!long_latency_stall && (ibuf.io.inst(0).valid || io.imem.resp.valid)) // instruction pending assert(!(ex_pc_valid || mem_pc_valid || wb_pc_valid) || clock_en) } // evaluate performance counters val icache_blocked = !(io.imem.resp.valid || RegNext(io.imem.resp.valid)) csr.io.counters foreach { c => c.inc := RegNext(perfEvents.evaluate(c.eventSel)) } val coreMonitorBundle = Wire(new CoreMonitorBundle(xLen, fLen)) coreMonitorBundle.clock := clock coreMonitorBundle.reset := reset coreMonitorBundle.hartid := io.hartid coreMonitorBundle.timer := csr.io.time(31,0) coreMonitorBundle.valid := csr.io.trace(0).valid && !csr.io.trace(0).exception coreMonitorBundle.pc := csr.io.trace(0).iaddr(vaddrBitsExtended-1, 0).sextTo(xLen) coreMonitorBundle.wrenx := wb_wen && !wb_set_sboard coreMonitorBundle.wrenf := false.B coreMonitorBundle.wrdst := wb_waddr coreMonitorBundle.wrdata := rf_wdata coreMonitorBundle.rd0src := wb_reg_inst(19,15) coreMonitorBundle.rd0val := RegNext(RegNext(ex_rs(0))) coreMonitorBundle.rd1src := wb_reg_inst(24,20) coreMonitorBundle.rd1val := RegNext(RegNext(ex_rs(1))) coreMonitorBundle.inst := csr.io.trace(0).insn coreMonitorBundle.excpt := csr.io.trace(0).exception coreMonitorBundle.priv_mode := csr.io.trace(0).priv if (enableCommitLog) { val t = csr.io.trace(0) val rd = wb_waddr val wfd = wb_ctrl.wfd val wxd = wb_ctrl.wxd val has_data = wb_wen && !wb_set_sboard when (t.valid && !t.exception) { when (wfd) { printf ("%d 0x%x (0x%x) f%d p%d 0xXXXXXXXXXXXXXXXX\n", t.priv, t.iaddr, t.insn, rd, rd+32.U) } .elsewhen (wxd && rd =/= 0.U && has_data) { printf ("%d 0x%x (0x%x) x%d 0x%x\n", t.priv, t.iaddr, t.insn, rd, rf_wdata) } .elsewhen (wxd && rd =/= 0.U && !has_data) { printf ("%d 0x%x (0x%x) x%d p%d 0xXXXXXXXXXXXXXXXX\n", t.priv, t.iaddr, t.insn, rd, rd) } .otherwise { printf ("%d 0x%x (0x%x)\n", t.priv, t.iaddr, t.insn) } } when (ll_wen && rf_waddr =/= 0.U) { printf ("x%d p%d 0x%x\n", rf_waddr, rf_waddr, rf_wdata) } } else { when (csr.io.trace(0).valid) { printf("C%d: %d [%d] pc=[%x] W[r%d=%x][%d] R[r%d=%x] R[r%d=%x] inst=[%x] DASM(%x)\n", io.hartid, coreMonitorBundle.timer, coreMonitorBundle.valid, coreMonitorBundle.pc, Mux(wb_ctrl.wxd || wb_ctrl.wfd, coreMonitorBundle.wrdst, 0.U), Mux(coreMonitorBundle.wrenx, coreMonitorBundle.wrdata, 0.U), coreMonitorBundle.wrenx, Mux(wb_ctrl.rxs1 || wb_ctrl.rfs1, coreMonitorBundle.rd0src, 0.U), Mux(wb_ctrl.rxs1 || wb_ctrl.rfs1, coreMonitorBundle.rd0val, 0.U), Mux(wb_ctrl.rxs2 || wb_ctrl.rfs2, coreMonitorBundle.rd1src, 0.U), Mux(wb_ctrl.rxs2 || wb_ctrl.rfs2, coreMonitorBundle.rd1val, 0.U), coreMonitorBundle.inst, coreMonitorBundle.inst) } } // CoreMonitorBundle for late latency writes val xrfWriteBundle = Wire(new CoreMonitorBundle(xLen, fLen)) xrfWriteBundle.clock := clock xrfWriteBundle.reset := reset xrfWriteBundle.hartid := io.hartid xrfWriteBundle.timer := csr.io.time(31,0) xrfWriteBundle.valid := false.B xrfWriteBundle.pc := 0.U xrfWriteBundle.wrdst := rf_waddr xrfWriteBundle.wrenx := rf_wen && !(csr.io.trace(0).valid && wb_wen && (wb_waddr === rf_waddr)) xrfWriteBundle.wrenf := false.B xrfWriteBundle.wrdata := rf_wdata xrfWriteBundle.rd0src := 0.U xrfWriteBundle.rd0val := 0.U xrfWriteBundle.rd1src := 0.U xrfWriteBundle.rd1val := 0.U xrfWriteBundle.inst := 0.U xrfWriteBundle.excpt := false.B xrfWriteBundle.priv_mode := csr.io.trace(0).priv if (rocketParams.haveSimTimeout) PlusArg.timeout( name = "max_core_cycles", docstring = "Kill the emulation after INT rdtime cycles. Off if 0." )(csr.io.time) } // leaving gated-clock domain val rocketImpl = withClock (gated_clock) { new RocketImpl } def checkExceptions(x: Seq[(Bool, UInt)]) = (WireInit(x.map(_._1).reduce(_||_)), WireInit(PriorityMux(x))) def coverExceptions(exceptionValid: Bool, cause: UInt, labelPrefix: String, coverCausesLabels: Seq[(Int, String)]): Unit = { for ((coverCause, label) <- coverCausesLabels) { property.cover(exceptionValid && (cause === coverCause.U), s"${labelPrefix}_${label}") } } def checkHazards(targets: Seq[(Bool, UInt)], cond: UInt => Bool) = targets.map(h => h._1 && cond(h._2)).reduce(_||_) def encodeVirtualAddress(a0: UInt, ea: UInt) = if (vaddrBitsExtended == vaddrBits) ea else { // efficient means to compress 64-bit VA into vaddrBits+1 bits // (VA is bad if VA(vaddrBits) != VA(vaddrBits-1)) val b = vaddrBitsExtended-1 val a = (a0 >> b).asSInt val msb = Mux(a === 0.S || a === -1.S, ea(b), !ea(b-1)) Cat(msb, ea(b-1, 0)) } class Scoreboard(n: Int, zero: Boolean = false) { def set(en: Bool, addr: UInt): Unit = update(en, _next | mask(en, addr)) def clear(en: Bool, addr: UInt): Unit = update(en, _next & ~mask(en, addr)) def read(addr: UInt): Bool = r(addr) def readBypassed(addr: UInt): Bool = _next(addr) private val _r = RegInit(0.U(n.W)) private val r = if (zero) (_r >> 1 << 1) else _r private var _next = r private var ens = false.B private def mask(en: Bool, addr: UInt) = Mux(en, 1.U << addr, 0.U) private def update(en: Bool, update: UInt) = { _next = update ens = ens || en when (ens) { _r := _next } } } } class RegFile(n: Int, w: Int, zero: Boolean = false) { val rf = Mem(n, UInt(w.W)) private def access(addr: UInt) = rf(~addr(log2Up(n)-1,0)) private val reads = ArrayBuffer[(UInt,UInt)]() private var canRead = true def read(addr: UInt) = { require(canRead) reads += addr -> Wire(UInt()) reads.last._2 := Mux(zero.B && addr === 0.U, 0.U, access(addr)) reads.last._2 } def write(addr: UInt, data: UInt) = { canRead = false when (addr =/= 0.U) { access(addr) := data for ((raddr, rdata) <- reads) when (addr === raddr) { rdata := data } } } } object ImmGen { def apply(sel: UInt, inst: UInt) = { val sign = Mux(sel === IMM_Z, 0.S, inst(31).asSInt) val b30_20 = Mux(sel === IMM_U, inst(30,20).asSInt, sign) val b19_12 = Mux(sel =/= IMM_U && sel =/= IMM_UJ, sign, inst(19,12).asSInt) val b11 = Mux(sel === IMM_U || sel === IMM_Z, 0.S, Mux(sel === IMM_UJ, inst(20).asSInt, Mux(sel === IMM_SB, inst(7).asSInt, sign))) val b10_5 = Mux(sel === IMM_U || sel === IMM_Z, 0.U, inst(30,25)) val b4_1 = Mux(sel === IMM_U, 0.U, Mux(sel === IMM_S || sel === IMM_SB, inst(11,8), Mux(sel === IMM_Z, inst(19,16), inst(24,21)))) val b0 = Mux(sel === IMM_S, inst(7), Mux(sel === IMM_I, inst(20), Mux(sel === IMM_Z, inst(15), 0.U))) Cat(sign, b30_20, b19_12, b11, b10_5, b4_1, b0).asSInt } }
module Rocket( // @[RocketCore.scala:153:7] input clock, // @[RocketCore.scala:153:7] input reset, // @[RocketCore.scala:153:7] input [3:0] io_hartid, // @[RocketCore.scala:134:14] input io_interrupts_debug, // @[RocketCore.scala:134:14] input io_interrupts_mtip, // @[RocketCore.scala:134:14] input io_interrupts_msip, // @[RocketCore.scala:134:14] input io_interrupts_meip, // @[RocketCore.scala:134:14] input io_interrupts_seip, // @[RocketCore.scala:134:14] output io_imem_might_request, // @[RocketCore.scala:134:14] output io_imem_req_valid, // @[RocketCore.scala:134:14] output [39:0] io_imem_req_bits_pc, // @[RocketCore.scala:134:14] output io_imem_req_bits_speculative, // @[RocketCore.scala:134:14] output io_imem_sfence_valid, // @[RocketCore.scala:134:14] output io_imem_sfence_bits_rs1, // @[RocketCore.scala:134:14] output io_imem_sfence_bits_rs2, // @[RocketCore.scala:134:14] output [38:0] io_imem_sfence_bits_addr, // @[RocketCore.scala:134:14] output io_imem_resp_ready, // @[RocketCore.scala:134:14] input io_imem_resp_valid, // @[RocketCore.scala:134:14] input io_imem_resp_bits_btb_taken, // @[RocketCore.scala:134:14] input io_imem_resp_bits_btb_bridx, // @[RocketCore.scala:134:14] input [4:0] io_imem_resp_bits_btb_entry, // @[RocketCore.scala:134:14] input [7:0] io_imem_resp_bits_btb_bht_history, // @[RocketCore.scala:134:14] input [39:0] io_imem_resp_bits_pc, // @[RocketCore.scala:134:14] input [31:0] io_imem_resp_bits_data, // @[RocketCore.scala:134:14] input io_imem_resp_bits_xcpt_pf_inst, // @[RocketCore.scala:134:14] input io_imem_resp_bits_xcpt_gf_inst, // @[RocketCore.scala:134:14] input io_imem_resp_bits_xcpt_ae_inst, // @[RocketCore.scala:134:14] input io_imem_resp_bits_replay, // @[RocketCore.scala:134:14] output io_imem_btb_update_valid, // @[RocketCore.scala:134:14] output [4:0] io_imem_btb_update_bits_prediction_entry, // @[RocketCore.scala:134:14] output [38:0] io_imem_btb_update_bits_pc, // @[RocketCore.scala:134:14] output io_imem_btb_update_bits_isValid, // @[RocketCore.scala:134:14] output [38:0] io_imem_btb_update_bits_br_pc, // @[RocketCore.scala:134:14] output [1:0] io_imem_btb_update_bits_cfiType, // @[RocketCore.scala:134:14] output io_imem_bht_update_valid, // @[RocketCore.scala:134:14] output [7:0] io_imem_bht_update_bits_prediction_history, // @[RocketCore.scala:134:14] output [38:0] io_imem_bht_update_bits_pc, // @[RocketCore.scala:134:14] output io_imem_bht_update_bits_branch, // @[RocketCore.scala:134:14] output io_imem_bht_update_bits_taken, // @[RocketCore.scala:134:14] output io_imem_bht_update_bits_mispredict, // @[RocketCore.scala:134:14] output io_imem_flush_icache, // @[RocketCore.scala:134:14] output io_imem_progress, // @[RocketCore.scala:134:14] input io_dmem_req_ready, // @[RocketCore.scala:134:14] output io_dmem_req_valid, // @[RocketCore.scala:134:14] output [39:0] io_dmem_req_bits_addr, // @[RocketCore.scala:134:14] output [6:0] io_dmem_req_bits_tag, // @[RocketCore.scala:134:14] output [4:0] io_dmem_req_bits_cmd, // @[RocketCore.scala:134:14] output [1:0] io_dmem_req_bits_size, // @[RocketCore.scala:134:14] output io_dmem_req_bits_signed, // @[RocketCore.scala:134:14] output [1:0] io_dmem_req_bits_dprv, // @[RocketCore.scala:134:14] output io_dmem_req_bits_dv, // @[RocketCore.scala:134:14] output io_dmem_s1_kill, // @[RocketCore.scala:134:14] output [63:0] io_dmem_s1_data_data, // @[RocketCore.scala:134:14] input io_dmem_s2_nack, // @[RocketCore.scala:134:14] input io_dmem_resp_valid, // @[RocketCore.scala:134:14] input [6:0] io_dmem_resp_bits_tag, // @[RocketCore.scala:134:14] input [1:0] io_dmem_resp_bits_size, // @[RocketCore.scala:134:14] input [63:0] io_dmem_resp_bits_data, // @[RocketCore.scala:134:14] input io_dmem_resp_bits_replay, // @[RocketCore.scala:134:14] input io_dmem_resp_bits_has_data, // @[RocketCore.scala:134:14] input [63:0] io_dmem_resp_bits_data_word_bypass, // @[RocketCore.scala:134:14] input io_dmem_replay_next, // @[RocketCore.scala:134:14] input io_dmem_s2_xcpt_ma_ld, // @[RocketCore.scala:134:14] input io_dmem_s2_xcpt_ma_st, // @[RocketCore.scala:134:14] input io_dmem_s2_xcpt_pf_ld, // @[RocketCore.scala:134:14] input io_dmem_s2_xcpt_pf_st, // @[RocketCore.scala:134:14] input io_dmem_s2_xcpt_ae_ld, // @[RocketCore.scala:134:14] input io_dmem_s2_xcpt_ae_st, // @[RocketCore.scala:134:14] input io_dmem_ordered, // @[RocketCore.scala:134:14] input io_dmem_perf_release, // @[RocketCore.scala:134:14] input io_dmem_perf_grant, // @[RocketCore.scala:134:14] output [3:0] io_ptw_ptbr_mode, // @[RocketCore.scala:134:14] output [43:0] io_ptw_ptbr_ppn, // @[RocketCore.scala:134:14] output io_ptw_sfence_valid, // @[RocketCore.scala:134:14] output io_ptw_sfence_bits_rs1, // @[RocketCore.scala:134:14] output io_ptw_status_debug, // @[RocketCore.scala:134:14] output [1:0] io_ptw_status_prv, // @[RocketCore.scala:134:14] output io_ptw_status_mxr, // @[RocketCore.scala:134:14] output io_ptw_status_sum, // @[RocketCore.scala:134:14] output io_ptw_pmp_0_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_0_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_0_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_0_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_0_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_0_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_0_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_1_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_1_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_1_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_1_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_1_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_1_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_1_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_2_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_2_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_2_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_2_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_2_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_2_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_2_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_3_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_3_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_3_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_3_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_3_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_3_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_3_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_4_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_4_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_4_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_4_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_4_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_4_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_4_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_5_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_5_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_5_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_5_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_5_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_5_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_5_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_6_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_6_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_6_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_6_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_6_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_6_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_6_mask, // @[RocketCore.scala:134:14] output io_ptw_pmp_7_cfg_l, // @[RocketCore.scala:134:14] output [1:0] io_ptw_pmp_7_cfg_a, // @[RocketCore.scala:134:14] output io_ptw_pmp_7_cfg_x, // @[RocketCore.scala:134:14] output io_ptw_pmp_7_cfg_w, // @[RocketCore.scala:134:14] output io_ptw_pmp_7_cfg_r, // @[RocketCore.scala:134:14] output [29:0] io_ptw_pmp_7_addr, // @[RocketCore.scala:134:14] output [31:0] io_ptw_pmp_7_mask, // @[RocketCore.scala:134:14] output [63:0] io_ptw_customCSRs_csrs_0_value, // @[RocketCore.scala:134:14] output [31:0] io_fpu_inst, // @[RocketCore.scala:134:14] output [63:0] io_fpu_fromint_data, // @[RocketCore.scala:134:14] output [2:0] io_fpu_fcsr_rm, // @[RocketCore.scala:134:14] input io_fpu_fcsr_flags_valid, // @[RocketCore.scala:134:14] input [4:0] io_fpu_fcsr_flags_bits, // @[RocketCore.scala:134:14] input [63:0] io_fpu_store_data, // @[RocketCore.scala:134:14] input [63:0] io_fpu_toint_data, // @[RocketCore.scala:134:14] output io_fpu_ll_resp_val, // @[RocketCore.scala:134:14] output [2:0] io_fpu_ll_resp_type, // @[RocketCore.scala:134:14] output [4:0] io_fpu_ll_resp_tag, // @[RocketCore.scala:134:14] output [63:0] io_fpu_ll_resp_data, // @[RocketCore.scala:134:14] output io_fpu_valid, // @[RocketCore.scala:134:14] input io_fpu_fcsr_rdy, // @[RocketCore.scala:134:14] input io_fpu_nack_mem, // @[RocketCore.scala:134:14] input io_fpu_illegal_rm, // @[RocketCore.scala:134:14] output io_fpu_killx, // @[RocketCore.scala:134:14] output io_fpu_killm, // @[RocketCore.scala:134:14] input io_fpu_dec_wen, // @[RocketCore.scala:134:14] input io_fpu_dec_ren1, // @[RocketCore.scala:134:14] input io_fpu_dec_ren2, // @[RocketCore.scala:134:14] input io_fpu_dec_ren3, // @[RocketCore.scala:134:14] input io_fpu_sboard_set, // @[RocketCore.scala:134:14] input io_fpu_sboard_clr, // @[RocketCore.scala:134:14] input [4:0] io_fpu_sboard_clra, // @[RocketCore.scala:134:14] output io_wfi // @[RocketCore.scala:134:14] ); wire io_dmem_req_valid_0; // @[RocketCore.scala:1130:41] wire ll_arb_io_out_ready; // @[RocketCore.scala:782:23, :809:44, :810:25] wire take_pc_wb; // @[RocketCore.scala:762:{27,38,53}] wire take_pc_mem; // @[RocketCore.scala:629:{32,49}] wire _ll_arb_io_in_0_ready; // @[RocketCore.scala:776:22] wire _ll_arb_io_out_valid; // @[RocketCore.scala:776:22] wire [63:0] _ll_arb_io_out_bits_data; // @[RocketCore.scala:776:22] wire [4:0] _ll_arb_io_out_bits_tag; // @[RocketCore.scala:776:22] wire _div_io_req_ready; // @[RocketCore.scala:511:19] wire _div_io_resp_valid; // @[RocketCore.scala:511:19] wire [63:0] _div_io_resp_bits_data; // @[RocketCore.scala:511:19] wire [4:0] _div_io_resp_bits_tag; // @[RocketCore.scala:511:19] wire [63:0] _alu_io_out; // @[RocketCore.scala:504:19] wire [63:0] _alu_io_adder_out; // @[RocketCore.scala:504:19] wire _alu_io_cmp_out; // @[RocketCore.scala:504:19] wire _bpu_io_xcpt_if; // @[RocketCore.scala:414:19] wire _bpu_io_xcpt_ld; // @[RocketCore.scala:414:19] wire _bpu_io_xcpt_st; // @[RocketCore.scala:414:19] wire _bpu_io_debug_if; // @[RocketCore.scala:414:19] wire _bpu_io_debug_ld; // @[RocketCore.scala:414:19] wire _bpu_io_debug_st; // @[RocketCore.scala:414:19] wire [63:0] _csr_io_rw_rdata; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_fp_illegal; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_fp_csr; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_read_illegal; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_write_illegal; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_write_flush; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_system_illegal; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_virtual_access_illegal; // @[RocketCore.scala:341:19] wire _csr_io_decode_0_virtual_system_illegal; // @[RocketCore.scala:341:19] wire _csr_io_csr_stall; // @[RocketCore.scala:341:19] wire _csr_io_eret; // @[RocketCore.scala:341:19] wire _csr_io_singleStep; // @[RocketCore.scala:341:19] wire _csr_io_status_debug; // @[RocketCore.scala:341:19] wire [31:0] _csr_io_status_isa; // @[RocketCore.scala:341:19] wire _csr_io_status_dv; // @[RocketCore.scala:341:19] wire [1:0] _csr_io_status_prv; // @[RocketCore.scala:341:19] wire _csr_io_status_v; // @[RocketCore.scala:341:19] wire [39:0] _csr_io_evec; // @[RocketCore.scala:341:19] wire [63:0] _csr_io_time; // @[RocketCore.scala:341:19] wire _csr_io_interrupt; // @[RocketCore.scala:341:19] wire [63:0] _csr_io_interrupt_cause; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_action; // @[RocketCore.scala:341:19] wire [1:0] _csr_io_bp_0_control_tmatch; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_m; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_s; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_u; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_x; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_w; // @[RocketCore.scala:341:19] wire _csr_io_bp_0_control_r; // @[RocketCore.scala:341:19] wire [38:0] _csr_io_bp_0_address; // @[RocketCore.scala:341:19] wire _csr_io_inhibit_cycle; // @[RocketCore.scala:341:19] wire _csr_io_trace_0_valid; // @[RocketCore.scala:341:19] wire [39:0] _csr_io_trace_0_iaddr; // @[RocketCore.scala:341:19] wire [31:0] _csr_io_trace_0_insn; // @[RocketCore.scala:341:19] wire _csr_io_trace_0_exception; // @[RocketCore.scala:341:19] wire [63:0] _csr_io_customCSRs_0_value; // @[RocketCore.scala:341:19] wire [63:0] _rf_ext_R0_data; // @[RocketCore.scala:1319:15] wire [63:0] _rf_ext_R1_data; // @[RocketCore.scala:1319:15] wire [39:0] _ibuf_io_pc; // @[RocketCore.scala:311:20] wire [4:0] _ibuf_io_btb_resp_entry; // @[RocketCore.scala:311:20] wire [7:0] _ibuf_io_btb_resp_bht_history; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_valid; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_xcpt0_pf_inst; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_xcpt0_gf_inst; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_xcpt0_ae_inst; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_xcpt1_pf_inst; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_xcpt1_gf_inst; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_xcpt1_ae_inst; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_replay; // @[RocketCore.scala:311:20] wire _ibuf_io_inst_0_bits_rvc; // @[RocketCore.scala:311:20] wire [31:0] _ibuf_io_inst_0_bits_inst_bits; // @[RocketCore.scala:311:20] wire [4:0] _ibuf_io_inst_0_bits_inst_rd; // @[RocketCore.scala:311:20] wire [4:0] _ibuf_io_inst_0_bits_inst_rs1; // @[RocketCore.scala:311:20] wire [4:0] _ibuf_io_inst_0_bits_inst_rs2; // @[RocketCore.scala:311:20] wire [4:0] _ibuf_io_inst_0_bits_inst_rs3; // @[RocketCore.scala:311:20] wire [31:0] _ibuf_io_inst_0_bits_raw; // @[RocketCore.scala:311:20] reg id_reg_pause; // @[RocketCore.scala:161:25] reg imem_might_request_reg; // @[RocketCore.scala:162:35] reg ex_ctrl_fp; // @[RocketCore.scala:243:20] reg ex_ctrl_rocc; // @[RocketCore.scala:243:20] reg ex_ctrl_branch; // @[RocketCore.scala:243:20] reg ex_ctrl_jal; // @[RocketCore.scala:243:20] reg ex_ctrl_jalr; // @[RocketCore.scala:243:20] reg ex_ctrl_rxs2; // @[RocketCore.scala:243:20] reg ex_ctrl_rxs1; // @[RocketCore.scala:243:20] reg [2:0] ex_ctrl_sel_alu2; // @[RocketCore.scala:243:20] reg [1:0] ex_ctrl_sel_alu1; // @[RocketCore.scala:243:20] reg [2:0] ex_ctrl_sel_imm; // @[RocketCore.scala:243:20] reg ex_ctrl_alu_dw; // @[RocketCore.scala:243:20] reg [4:0] ex_ctrl_alu_fn; // @[RocketCore.scala:243:20] reg ex_ctrl_mem; // @[RocketCore.scala:243:20] reg [4:0] ex_ctrl_mem_cmd; // @[RocketCore.scala:243:20] reg ex_ctrl_rfs1; // @[RocketCore.scala:243:20] reg ex_ctrl_rfs2; // @[RocketCore.scala:243:20] reg ex_ctrl_wfd; // @[RocketCore.scala:243:20] reg ex_ctrl_mul; // @[RocketCore.scala:243:20] reg ex_ctrl_div; // @[RocketCore.scala:243:20] reg ex_ctrl_wxd; // @[RocketCore.scala:243:20] reg [2:0] ex_ctrl_csr; // @[RocketCore.scala:243:20] reg ex_ctrl_fence_i; // @[RocketCore.scala:243:20] reg mem_ctrl_fp; // @[RocketCore.scala:244:21] reg mem_ctrl_rocc; // @[RocketCore.scala:244:21] reg mem_ctrl_branch; // @[RocketCore.scala:244:21] reg mem_ctrl_jal; // @[RocketCore.scala:244:21] reg mem_ctrl_jalr; // @[RocketCore.scala:244:21] reg mem_ctrl_rxs2; // @[RocketCore.scala:244:21] reg mem_ctrl_rxs1; // @[RocketCore.scala:244:21] reg mem_ctrl_mem; // @[RocketCore.scala:244:21] reg mem_ctrl_rfs1; // @[RocketCore.scala:244:21] reg mem_ctrl_rfs2; // @[RocketCore.scala:244:21] reg mem_ctrl_wfd; // @[RocketCore.scala:244:21] reg mem_ctrl_mul; // @[RocketCore.scala:244:21] reg mem_ctrl_div; // @[RocketCore.scala:244:21] reg mem_ctrl_wxd; // @[RocketCore.scala:244:21] reg [2:0] mem_ctrl_csr; // @[RocketCore.scala:244:21] reg mem_ctrl_fence_i; // @[RocketCore.scala:244:21] reg mem_ctrl_vec; // @[RocketCore.scala:244:21] reg wb_ctrl_rocc; // @[RocketCore.scala:245:20] reg wb_ctrl_rxs2; // @[RocketCore.scala:245:20] reg wb_ctrl_rxs1; // @[RocketCore.scala:245:20] reg wb_ctrl_mem; // @[RocketCore.scala:245:20] reg wb_ctrl_rfs1; // @[RocketCore.scala:245:20] reg wb_ctrl_rfs2; // @[RocketCore.scala:245:20] reg wb_ctrl_wfd; // @[RocketCore.scala:245:20] reg wb_ctrl_div; // @[RocketCore.scala:245:20] reg wb_ctrl_wxd; // @[RocketCore.scala:245:20] reg [2:0] wb_ctrl_csr; // @[RocketCore.scala:245:20] reg wb_ctrl_fence_i; // @[RocketCore.scala:245:20] reg wb_ctrl_vec; // @[RocketCore.scala:245:20] reg ex_reg_xcpt_interrupt; // @[RocketCore.scala:247:35] reg ex_reg_valid; // @[RocketCore.scala:248:35] reg ex_reg_rvc; // @[RocketCore.scala:249:35] reg [4:0] ex_reg_btb_resp_entry; // @[RocketCore.scala:250:35] reg [7:0] ex_reg_btb_resp_bht_history; // @[RocketCore.scala:250:35] reg ex_reg_xcpt; // @[RocketCore.scala:251:35] reg ex_reg_flush_pipe; // @[RocketCore.scala:252:35] reg ex_reg_load_use; // @[RocketCore.scala:253:35] reg [63:0] ex_reg_cause; // @[RocketCore.scala:254:35] reg ex_reg_replay; // @[RocketCore.scala:255:26] reg [39:0] ex_reg_pc; // @[RocketCore.scala:256:22] reg [1:0] ex_reg_mem_size; // @[RocketCore.scala:257:28] reg [31:0] ex_reg_inst; // @[RocketCore.scala:259:24] reg [31:0] ex_reg_raw_inst; // @[RocketCore.scala:260:28] reg mem_reg_xcpt_interrupt; // @[RocketCore.scala:264:36] reg mem_reg_valid; // @[RocketCore.scala:265:36] reg mem_reg_rvc; // @[RocketCore.scala:266:36] reg [4:0] mem_reg_btb_resp_entry; // @[RocketCore.scala:267:36] reg [7:0] mem_reg_btb_resp_bht_history; // @[RocketCore.scala:267:36] reg mem_reg_xcpt; // @[RocketCore.scala:268:36] reg mem_reg_replay; // @[RocketCore.scala:269:36] reg mem_reg_flush_pipe; // @[RocketCore.scala:270:36] reg [63:0] mem_reg_cause; // @[RocketCore.scala:271:36] reg mem_mem_cmd_bh; // @[RocketCore.scala:272:36] reg mem_reg_load; // @[RocketCore.scala:273:36] reg mem_reg_store; // @[RocketCore.scala:274:36] reg mem_reg_sfence; // @[RocketCore.scala:276:27] reg [39:0] mem_reg_pc; // @[RocketCore.scala:277:23] reg [31:0] mem_reg_inst; // @[RocketCore.scala:278:25] reg [1:0] mem_reg_mem_size; // @[RocketCore.scala:279:29] reg mem_reg_hls_or_dv; // @[RocketCore.scala:280:30] reg [31:0] mem_reg_raw_inst; // @[RocketCore.scala:281:29] reg [63:0] mem_reg_wdata; // @[RocketCore.scala:282:26] reg [63:0] mem_reg_rs2; // @[RocketCore.scala:283:24] reg mem_br_taken; // @[RocketCore.scala:284:25] reg wb_reg_valid; // @[RocketCore.scala:288:35] reg wb_reg_xcpt; // @[RocketCore.scala:289:35] reg wb_reg_replay; // @[RocketCore.scala:290:35] reg wb_reg_flush_pipe; // @[RocketCore.scala:291:35] reg [63:0] wb_reg_cause; // @[RocketCore.scala:292:35] reg wb_reg_sfence; // @[RocketCore.scala:294:26] reg [39:0] wb_reg_pc; // @[RocketCore.scala:295:22] reg [1:0] wb_reg_mem_size; // @[RocketCore.scala:296:28] reg wb_reg_hls_or_dv; // @[RocketCore.scala:297:29] reg [31:0] wb_reg_inst; // @[RocketCore.scala:300:24] reg [31:0] wb_reg_raw_inst; // @[RocketCore.scala:301:28] reg [63:0] wb_reg_wdata; // @[RocketCore.scala:302:25] wire ibuf_io_kill = take_pc_wb | take_pc_mem; // @[RocketCore.scala:307:35, :629:{32,49}, :762:{27,38,53}] wire [29:0] id_ctrl_decoder_decoded_invInputs = ~(_ibuf_io_inst_0_bits_inst_bits[31:2]); // @[pla.scala:78:21] wire [6:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_2 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [6:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_3 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [8:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_6 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [5:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_7 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[4]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_9 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_12 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [12:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_13 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_14 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_16 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[3], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [4:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_17 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [10:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_20 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [10:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_21 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [8:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_24 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [9:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_25 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [6:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_26 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], _ibuf_io_inst_0_bits_inst_bits[3], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [6:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_29 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [9:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_33 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], _ibuf_io_inst_0_bits_inst_bits[3], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_34 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_36 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[3], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [8:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_42 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_43 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], _ibuf_io_inst_0_bits_inst_bits[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [6:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_44 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_48 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [10:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_52 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], _ibuf_io_inst_0_bits_inst_bits[3], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_55 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], _ibuf_io_inst_0_bits_inst_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [8:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_56 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[12]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_61 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_63 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], _ibuf_io_inst_0_bits_inst_bits[14]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_84 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[14], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_85 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[14], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_89 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[3], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [12:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_90 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], _ibuf_io_inst_0_bits_inst_bits[3], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[12], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [21:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_91 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[3], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_95 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], _ibuf_io_inst_0_bits_inst_bits[3], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [18:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_96 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], _ibuf_io_inst_0_bits_inst_bits[3], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_101 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_102 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[12], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [21:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_104 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[8], id_ctrl_decoder_decoded_invInputs[9], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_106 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_107 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_108 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_109 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_110 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [13:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_111 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_115 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_116 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_120 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_122 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [21:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_124 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[14], _ibuf_io_inst_0_bits_inst_bits[20], _ibuf_io_inst_0_bits_inst_bits[21], _ibuf_io_inst_0_bits_inst_bits[22], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_130 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_133 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[3], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_140 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], _ibuf_io_inst_0_bits_inst_bits[20], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_142 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[18], _ibuf_io_inst_0_bits_inst_bits[21], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_144 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_146 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], _ibuf_io_inst_0_bits_inst_bits[26], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_149 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_152 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_154 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], _ibuf_io_inst_0_bits_inst_bits[27], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_155 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_157 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_158 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_159 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_164 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [21:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_169 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], _ibuf_io_inst_0_bits_inst_bits[23], _ibuf_io_inst_0_bits_inst_bits[24], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_179 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_180 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_181 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_182 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_184 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_185 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [19:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_188 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [19:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_189 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [20:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_192 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [20:0] _id_ctrl_decoder_decoded_andMatrixOutputs_T_193 = {_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [2:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_2 = {&_id_ctrl_decoder_decoded_andMatrixOutputs_T_52, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_90, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_95}; // @[pla.scala:98:{53,70}, :114:19] wire [45:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_10 = {&_id_ctrl_decoder_decoded_andMatrixOutputs_T, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_2, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[12]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_6, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_7, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_12, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_13, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_14, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_16, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_25, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_26, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_34, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_36, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_43, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_48, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_52, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_55, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_61, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_84, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_85, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_89, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_90, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_91, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_95, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_115, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_116, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_120, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_122, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_124, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_130, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_133, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_149, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_152, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_157, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_158, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_159, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_164, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_169, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], _ibuf_io_inst_0_bits_inst_bits[31]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], _ibuf_io_inst_0_bits_inst_bits[31]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], _ibuf_io_inst_0_bits_inst_bits[31]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], _ibuf_io_inst_0_bits_inst_bits[29], id_ctrl_decoder_decoded_invInputs[28], _ibuf_io_inst_0_bits_inst_bits[31]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_181, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_182, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_188, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_189}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19] wire [2:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_12 = {&{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_85}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19] wire [8:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_29 = {&{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[3], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[10]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_2, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_3, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_29, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_44, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_52, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_90, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_96, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_104}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19] wire [43:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_59 = {&_id_ctrl_decoder_decoded_andMatrixOutputs_T, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_2, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_3, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_6, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_12, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_13, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_14, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_16, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_24, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_29, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_34, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_36, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_42, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_44, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_48, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_52, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_56, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_61, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_63, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_84, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_85, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_89, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_90, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_91, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_95, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_102, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_115, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_116, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_120, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_122, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_124, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_130, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_133, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_149, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_152, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_157, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_158, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_159, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_164, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_169, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_184, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_185, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_192, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_193}; // @[pla.scala:98:{53,70}, :114:19] wire [21:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_61 = {&_id_ctrl_decoder_decoded_andMatrixOutputs_T_9, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_12, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_13, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_14, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_16, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_52, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_61, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_63, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[11], _ibuf_io_inst_0_bits_inst_bits[14], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[29]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_84, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_85, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_89, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_90, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_95, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_115, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_116, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_122, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_152, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_164}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19] wire [20:0] _id_ctrl_decoder_decoded_orMatrixOutputs_T_67 = {&_id_ctrl_decoder_decoded_andMatrixOutputs_T_17, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_20, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_21, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[12], id_ctrl_decoder_decoded_invInputs[12]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], _ibuf_io_inst_0_bits_inst_bits[2], id_ctrl_decoder_decoded_invInputs[1], id_ctrl_decoder_decoded_invInputs[2], id_ctrl_decoder_decoded_invInputs[4], _ibuf_io_inst_0_bits_inst_bits[13], id_ctrl_decoder_decoded_invInputs[12]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_106, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_107, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_108, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_109, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_110, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_111, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_140, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_142, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_144, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_146, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_154, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_155, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[1], id_ctrl_decoder_decoded_invInputs[0], id_ctrl_decoder_decoded_invInputs[1], _ibuf_io_inst_0_bits_inst_bits[4], id_ctrl_decoder_decoded_invInputs[3], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[18], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[30], _ibuf_io_inst_0_bits_inst_bits[31]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_179, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_180}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19] wire [2:0] id_ctrl_csr = {|{&{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[8], id_ctrl_decoder_decoded_invInputs[9], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[13], id_ctrl_decoder_decoded_invInputs[14], id_ctrl_decoder_decoded_invInputs[15], id_ctrl_decoder_decoded_invInputs[16], id_ctrl_decoder_decoded_invInputs[17], id_ctrl_decoder_decoded_invInputs[19], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], id_ctrl_decoder_decoded_invInputs[26], id_ctrl_decoder_decoded_invInputs[27], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_43, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_55, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[8], id_ctrl_decoder_decoded_invInputs[9], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[13], id_ctrl_decoder_decoded_invInputs[14], id_ctrl_decoder_decoded_invInputs[15], id_ctrl_decoder_decoded_invInputs[16], id_ctrl_decoder_decoded_invInputs[17], id_ctrl_decoder_decoded_invInputs[18], _ibuf_io_inst_0_bits_inst_bits[21], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &{_ibuf_io_inst_0_bits_inst_bits[0], _ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[8], id_ctrl_decoder_decoded_invInputs[9], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[13], id_ctrl_decoder_decoded_invInputs[14], id_ctrl_decoder_decoded_invInputs[15], id_ctrl_decoder_decoded_invInputs[16], id_ctrl_decoder_decoded_invInputs[17], _ibuf_io_inst_0_bits_inst_bits[20], id_ctrl_decoder_decoded_invInputs[19], _ibuf_io_inst_0_bits_inst_bits[22], id_ctrl_decoder_decoded_invInputs[21], id_ctrl_decoder_decoded_invInputs[22], id_ctrl_decoder_decoded_invInputs[23], id_ctrl_decoder_decoded_invInputs[24], id_ctrl_decoder_decoded_invInputs[25], _ibuf_io_inst_0_bits_inst_bits[28], id_ctrl_decoder_decoded_invInputs[28], id_ctrl_decoder_decoded_invInputs[29]}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_101, &{_ibuf_io_inst_0_bits_inst_bits[4], _ibuf_io_inst_0_bits_inst_bits[5], _ibuf_io_inst_0_bits_inst_bits[6], id_ctrl_decoder_decoded_invInputs[5], id_ctrl_decoder_decoded_invInputs[6], id_ctrl_decoder_decoded_invInputs[7], id_ctrl_decoder_decoded_invInputs[8], id_ctrl_decoder_decoded_invInputs[9], id_ctrl_decoder_decoded_invInputs[10], id_ctrl_decoder_decoded_invInputs[11], id_ctrl_decoder_decoded_invInputs[12], id_ctrl_decoder_decoded_invInputs[13], id_ctrl_decoder_decoded_invInputs[14], id_ctrl_decoder_decoded_invInputs[15], id_ctrl_decoder_decoded_invInputs[16], id_ctrl_decoder_decoded_invInputs[17], id_ctrl_decoder_decoded_invInputs[18], _ibuf_io_inst_0_bits_inst_bits[21], id_ctrl_decoder_decoded_invInputs[20], id_ctrl_decoder_decoded_invInputs[21], _ibuf_io_inst_0_bits_inst_bits[24], _ibuf_io_inst_0_bits_inst_bits[25], id_ctrl_decoder_decoded_invInputs[24], _ibuf_io_inst_0_bits_inst_bits[27], _ibuf_io_inst_0_bits_inst_bits[28], _ibuf_io_inst_0_bits_inst_bits[29], _ibuf_io_inst_0_bits_inst_bits[30], id_ctrl_decoder_decoded_invInputs[29]}}, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_55, &_id_ctrl_decoder_decoded_andMatrixOutputs_T_43}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] reg id_reg_fence; // @[RocketCore.scala:333:29] wire _id_csr_ren_T = id_ctrl_csr == 3'h6; // @[package.scala:16:47] wire id_csr_en = _id_csr_ren_T | (&id_ctrl_csr) | id_ctrl_csr == 3'h5; // @[package.scala:16:47, :81:59] wire id_mem_busy = ~io_dmem_ordered | io_dmem_req_valid_0; // @[RocketCore.scala:403:{21,38}, :1130:41] wire _io_rocc_cmd_valid_T = wb_reg_valid & wb_ctrl_rocc; // @[RocketCore.scala:245:20, :288:35, :407:53] wire bypass_sources_3_1 = mem_reg_valid & mem_ctrl_wxd; // @[RocketCore.scala:244:21, :265:36, :459:20] wire _fp_data_hazard_ex_T_1 = ex_reg_inst[11:7] == _ibuf_io_inst_0_bits_inst_rs1; // @[RocketCore.scala:259:24, :311:20, :453:29, :461:82] wire _fp_data_hazard_mem_T_1 = mem_reg_inst[11:7] == _ibuf_io_inst_0_bits_inst_rs1; // @[RocketCore.scala:278:25, :311:20, :454:31, :461:82] wire _fp_data_hazard_ex_T_3 = ex_reg_inst[11:7] == _ibuf_io_inst_0_bits_inst_rs2; // @[RocketCore.scala:259:24, :311:20, :453:29, :461:82] wire _fp_data_hazard_mem_T_3 = mem_reg_inst[11:7] == _ibuf_io_inst_0_bits_inst_rs2; // @[RocketCore.scala:278:25, :311:20, :454:31, :461:82] reg ex_reg_rs_bypass_0; // @[RocketCore.scala:465:29] reg ex_reg_rs_bypass_1; // @[RocketCore.scala:465:29] reg [1:0] ex_reg_rs_lsb_0; // @[RocketCore.scala:466:26] reg [1:0] ex_reg_rs_lsb_1; // @[RocketCore.scala:466:26] reg [61:0] ex_reg_rs_msb_0; // @[RocketCore.scala:467:26] reg [61:0] ex_reg_rs_msb_1; // @[RocketCore.scala:467:26] wire [3:0][63:0] _GEN = {{io_dmem_resp_bits_data_word_bypass}, {wb_reg_wdata}, {mem_reg_wdata}, {64'h0}}; // @[package.scala:39:{76,86}] wire [63:0] ex_rs_0 = ex_reg_rs_bypass_0 ? _GEN[ex_reg_rs_lsb_0] : {ex_reg_rs_msb_0, ex_reg_rs_lsb_0}; // @[package.scala:39:{76,86}] wire [63:0] ex_rs_1 = ex_reg_rs_bypass_1 ? _GEN[ex_reg_rs_lsb_1] : {ex_reg_rs_msb_1, ex_reg_rs_lsb_1}; // @[package.scala:39:{76,86}] wire _ex_imm_b0_T_4 = ex_ctrl_sel_imm == 3'h5; // @[RocketCore.scala:243:20, :1341:24] wire ex_imm_sign = ~_ex_imm_b0_T_4 & ex_reg_inst[31]; // @[RocketCore.scala:259:24, :1341:{19,24,44}] wire _ex_imm_b4_1_T = ex_ctrl_sel_imm == 3'h2; // @[RocketCore.scala:243:20, :1342:26] wire _ex_imm_b4_1_T_2 = ex_ctrl_sel_imm == 3'h1; // @[RocketCore.scala:243:20, :1346:23] wire _ex_imm_b0_T = ex_ctrl_sel_imm == 3'h0; // @[RocketCore.scala:243:20, :1349:24] wire [66:0] ex_rs1shl = {3'h0, ex_reg_inst[3] ? {32'h0, ex_rs_0[31:0]} : ex_rs_0} << ex_reg_inst[14:13]; // @[RocketCore.scala:259:24, :469:14, :471:{22,34,47,65,79}] wire [3:0][63:0] _GEN_0 = {{ex_rs1shl[63:0]}, {{{24{ex_reg_pc[39]}}, ex_reg_pc}}, {ex_rs_0}, {64'h0}}; // @[RocketCore.scala:256:22, :469:14, :471:65, :472:48] wire [3:0] _ex_op2_T_1 = ex_reg_rvc ? 4'h2 : 4'h4; // @[RocketCore.scala:249:35, :481:19] wire div_io_req_valid = ex_reg_valid & ex_ctrl_div; // @[RocketCore.scala:243:20, :248:35, :512:36] wire ex_pc_valid = ex_reg_valid | ex_reg_replay | ex_reg_xcpt_interrupt; // @[RocketCore.scala:247:35, :248:35, :255:26, :595:{34,51}] wire wb_dcache_miss = wb_ctrl_mem & ~io_dmem_resp_valid; // @[RocketCore.scala:245:20, :596:{36,39}] wire replay_ex = ex_reg_replay | ex_reg_valid & (ex_ctrl_mem & ~io_dmem_req_ready | ex_ctrl_div & ~_div_io_req_ready | wb_dcache_miss & ex_reg_load_use); // @[RocketCore.scala:243:20, :248:35, :253:35, :255:26, :511:19, :596:36, :597:{42,45,64}, :598:{42,45}, :600:43, :601:{33,50,75}] wire ctrl_killx = ibuf_io_kill | replay_ex | ~ex_reg_valid; // @[RocketCore.scala:248:35, :307:35, :601:33, :602:{35,48,51}] wire _mem_cfi_taken_T = mem_ctrl_branch & mem_br_taken; // @[RocketCore.scala:244:21, :284:25, :616:25] wire [3:0] _mem_br_target_T_6 = mem_reg_rvc ? 4'h2 : 4'h4; // @[RocketCore.scala:266:36, :618:8] wire [31:0] _mem_br_target_T_8 = _mem_cfi_taken_T ? {{20{mem_reg_inst[31]}}, mem_reg_inst[7], mem_reg_inst[30:25], mem_reg_inst[11:8], 1'h0} : mem_ctrl_jal ? {{12{mem_reg_inst[31]}}, mem_reg_inst[19:12], mem_reg_inst[20], mem_reg_inst[30:21], 1'h0} : {{28{_mem_br_target_T_6[3]}}, _mem_br_target_T_6}; // @[RocketCore.scala:244:21, :278:25, :616:{8,25}, :617:8, :618:8, :1341:44, :1343:65, :1345:39, :1346:39, :1347:62, :1349:57, :1355:8] wire [39:0] _mem_br_target_T_9 = mem_reg_pc + {{8{_mem_br_target_T_8[31]}}, _mem_br_target_T_8}; // @[RocketCore.scala:277:23, :615:41, :616:8] wire [39:0] _mem_npc_T_4 = mem_ctrl_jalr | mem_reg_sfence ? {mem_reg_wdata[63:39] == 25'h0 | (&(mem_reg_wdata[63:39])) ? mem_reg_wdata[39] : ~(mem_reg_wdata[38]), mem_reg_wdata[38:0]} : _mem_br_target_T_9; // @[RocketCore.scala:244:21, :276:27, :282:26, :615:41, :619:{21,36}, :1293:{17,23}, :1294:{18,21,29,34,46,51,54}, :1295:{8,16}] wire [39:0] mem_npc = _mem_npc_T_4 & 40'hFFFFFFFFFE; // @[RocketCore.scala:619:{21,129}] wire mem_wrong_npc = ex_pc_valid ? mem_npc != ex_reg_pc : ~(_ibuf_io_inst_0_valid | io_imem_resp_valid) | mem_npc != _ibuf_io_pc; // @[RocketCore.scala:256:22, :311:20, :595:{34,51}, :619:129, :621:{8,30}, :622:{8,31,62}] wire mem_cfi = mem_ctrl_branch | mem_ctrl_jalr | mem_ctrl_jal; // @[RocketCore.scala:244:21, :625:{33,50}] assign take_pc_mem = mem_reg_valid & ~mem_reg_xcpt & (mem_wrong_npc | mem_reg_sfence); // @[RocketCore.scala:265:36, :268:36, :276:27, :621:8, :624:27, :629:{32,49,71}] wire mem_debug_breakpoint = mem_reg_load & _bpu_io_debug_ld | mem_reg_store & _bpu_io_debug_st; // @[RocketCore.scala:273:36, :274:36, :414:19, :678:{44,64,82}] wire mem_ldst_xcpt = mem_debug_breakpoint | mem_reg_load & _bpu_io_xcpt_ld | mem_reg_store & _bpu_io_xcpt_st; // @[RocketCore.scala:273:36, :274:36, :414:19, :677:{38,57,75}, :678:64, :1278:35] wire dcache_kill_mem = bypass_sources_3_1 & io_dmem_replay_next; // @[RocketCore.scala:459:20, :695:55] wire fpu_kill_mem = mem_reg_valid & mem_ctrl_fp & io_fpu_nack_mem; // @[RocketCore.scala:244:21, :265:36, :696:{36,51}] wire killm_common = dcache_kill_mem | take_pc_wb | mem_reg_xcpt | ~mem_reg_valid; // @[RocketCore.scala:265:36, :268:36, :695:55, :700:{38,52,68,71}, :762:{27,38,53}] reg div_io_kill_REG; // @[RocketCore.scala:701:41] wire _GEN_1 = wb_reg_valid & wb_ctrl_mem; // @[RocketCore.scala:245:20, :288:35, :730:19] wire _GEN_2 = _GEN_1 & io_dmem_s2_xcpt_pf_st; // @[RocketCore.scala:730:{19,34}] wire _GEN_3 = _GEN_1 & io_dmem_s2_xcpt_pf_ld; // @[RocketCore.scala:730:19, :731:34] wire _GEN_4 = _GEN_1 & io_dmem_s2_xcpt_ae_st; // @[RocketCore.scala:730:19, :734:34] wire _GEN_5 = _GEN_1 & io_dmem_s2_xcpt_ae_ld; // @[RocketCore.scala:730:19, :735:34] wire _GEN_6 = _GEN_1 & io_dmem_s2_xcpt_ma_st; // @[RocketCore.scala:730:19, :736:34] wire wb_xcpt = wb_reg_xcpt | _GEN_2 | _GEN_3 | _GEN_4 | _GEN_5 | _GEN_6 | _GEN_1 & io_dmem_s2_xcpt_ma_ld; // @[RocketCore.scala:289:35, :730:{19,34}, :731:34, :734:34, :735:34, :736:34, :737:34, :1278:35] wire wb_wxd = wb_reg_valid & wb_ctrl_wxd; // @[RocketCore.scala:245:20, :288:35, :755:29] wire wb_set_sboard = wb_ctrl_div | wb_dcache_miss | wb_ctrl_rocc | wb_ctrl_vec; // @[RocketCore.scala:245:20, :596:36, :756:{35,53,69}] wire replay_wb_common = io_dmem_s2_nack | wb_reg_replay; // @[RocketCore.scala:290:35, :757:42] wire _replay_wb_T = replay_wb_common | _io_rocc_cmd_valid_T; // @[RocketCore.scala:407:53, :757:42, :761:36] assign take_pc_wb = _replay_wb_T | wb_xcpt | _csr_io_eret | wb_reg_flush_pipe; // @[RocketCore.scala:291:35, :341:19, :761:36, :762:{27,38,53}, :1278:35] wire dmem_resp_valid = io_dmem_resp_valid & io_dmem_resp_bits_has_data; // @[RocketCore.scala:768:44] wire dmem_resp_replay = dmem_resp_valid & io_dmem_resp_bits_replay; // @[RocketCore.scala:768:44, :769:42] wire _GEN_7 = dmem_resp_replay & ~(io_dmem_resp_bits_tag[0]); // @[RocketCore.scala:765:{23,45}, :769:42, :809:26] assign ll_arb_io_out_ready = ~_GEN_7 & ~wb_wxd; // @[RocketCore.scala:755:29, :782:{23,26}, :809:{26,44}, :810:25] wire [4:0] ll_waddr = _GEN_7 ? io_dmem_resp_bits_tag[5:1] : _ll_arb_io_out_bits_tag; // @[RocketCore.scala:767:46, :776:22, :780:26, :809:{26,44}, :811:14] wire ll_wen = _GEN_7 | ll_arb_io_out_ready & _ll_arb_io_out_valid; // @[Decoupled.scala:51:35] wire wb_valid = wb_reg_valid & ~_replay_wb_T & ~wb_xcpt; // @[RocketCore.scala:288:35, :761:36, :815:{31,34,45,48}, :1278:35] wire wb_wen = wb_valid & wb_ctrl_wxd; // @[RocketCore.scala:245:20, :815:{31,45}, :816:25] wire rf_wen = wb_wen | ll_wen; // @[RocketCore.scala:781:24, :809:44, :812:12, :816:25, :817:23] wire [4:0] rf_waddr = ll_wen ? ll_waddr : wb_reg_inst[11:7]; // @[RocketCore.scala:300:24, :455:29, :780:26, :781:24, :809:44, :811:14, :812:12, :818:21] wire [63:0] rf_wdata = dmem_resp_valid & ~(io_dmem_resp_bits_tag[0]) ? io_dmem_resp_bits_data : ll_wen ? _ll_arb_io_out_bits_data : (|wb_ctrl_csr) ? _csr_io_rw_rdata : wb_reg_wdata; // @[RocketCore.scala:245:20, :302:25, :341:19, :765:{23,45}, :768:44, :776:22, :781:24, :809:44, :812:12, :819:{21,38}, :820:21, :821:{21,34}] wire [63:0] id_rs_0 = rf_wen & (|rf_waddr) & rf_waddr == _ibuf_io_inst_0_bits_inst_rs1 ? rf_wdata : _rf_ext_R1_data; // @[RocketCore.scala:311:20, :817:23, :818:21, :819:21, :824:17, :1319:15, :1326:19, :1331:{16,25}, :1334:{20,31,39}] wire [63:0] id_rs_1 = rf_wen & (|rf_waddr) & rf_waddr == _ibuf_io_inst_0_bits_inst_rs2 ? rf_wdata : _rf_ext_R0_data; // @[RocketCore.scala:311:20, :817:23, :818:21, :819:21, :824:17, :1319:15, :1326:19, :1331:{16,25}, :1334:{20,31,39}] wire _htval_valid_imem_T = wb_reg_cause == 64'h14; // @[package.scala:16:47] wire tval_any_addr = ~wb_reg_xcpt | wb_reg_cause == 64'h3 | wb_reg_cause == 64'h1 | wb_reg_cause == 64'hC | _htval_valid_imem_T; // @[package.scala:16:47, :81:59] wire hazard_targets_0_1 = (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_59) & (|_ibuf_io_inst_0_bits_inst_rs1); // @[pla.scala:114:{19,36}] wire hazard_targets_1_1 = (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_61) & (|_ibuf_io_inst_0_bits_inst_rs2); // @[pla.scala:114:{19,36}] wire hazard_targets_2_1 = (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_10) & (|_ibuf_io_inst_0_bits_inst_rd); // @[pla.scala:114:{19,36}] reg [31:0] _r; // @[RocketCore.scala:1305:29] wire [31:0] r = {_r[31:1], 1'h0}; // @[RocketCore.scala:1305:29, :1306:{35,40}] wire [31:0] _GEN_8 = {27'h0, _ibuf_io_inst_0_bits_inst_rs1}; // @[RocketCore.scala:311:20, :1302:35, :1309:58] wire [31:0] _id_sboard_hazard_T = r >> _GEN_8; // @[RocketCore.scala:1302:35, :1306:40] wire [31:0] _GEN_9 = {27'h0, _ibuf_io_inst_0_bits_inst_rs2}; // @[RocketCore.scala:311:20, :1302:35, :1309:58] wire [31:0] _id_sboard_hazard_T_7 = r >> _GEN_9; // @[RocketCore.scala:1302:35, :1306:40] wire [31:0] _GEN_10 = {27'h0, _ibuf_io_inst_0_bits_inst_rd}; // @[RocketCore.scala:311:20, :1302:35, :1309:58] wire [31:0] _id_sboard_hazard_T_14 = r >> _GEN_10; // @[RocketCore.scala:1302:35, :1306:40] wire _fp_data_hazard_ex_T_7 = _ibuf_io_inst_0_bits_inst_rd == ex_reg_inst[11:7]; // @[RocketCore.scala:259:24, :311:20, :453:29, :989:70] wire _fp_data_hazard_mem_T_7 = _ibuf_io_inst_0_bits_inst_rd == mem_reg_inst[11:7]; // @[RocketCore.scala:278:25, :311:20, :454:31, :998:72] wire data_hazard_mem = mem_ctrl_wxd & (hazard_targets_0_1 & _fp_data_hazard_mem_T_1 | hazard_targets_1_1 & _fp_data_hazard_mem_T_3 | hazard_targets_2_1 & _fp_data_hazard_mem_T_7); // @[RocketCore.scala:244:21, :461:82, :969:42, :970:42, :971:42, :998:{38,72}, :1287:{27,50}] wire _fp_data_hazard_wb_T_1 = _ibuf_io_inst_0_bits_inst_rs1 == wb_reg_inst[11:7]; // @[RocketCore.scala:300:24, :311:20, :455:29, :1008:70] wire _fp_data_hazard_wb_T_3 = _ibuf_io_inst_0_bits_inst_rs2 == wb_reg_inst[11:7]; // @[RocketCore.scala:300:24, :311:20, :455:29, :1008:70] wire _fp_data_hazard_wb_T_7 = _ibuf_io_inst_0_bits_inst_rd == wb_reg_inst[11:7]; // @[RocketCore.scala:300:24, :311:20, :455:29, :1008:70] reg [31:0] _id_stall_fpu_r; // @[RocketCore.scala:1305:29] wire [31:0] _id_stall_fpu_T_20 = _id_stall_fpu_r >> _GEN_8; // @[RocketCore.scala:1302:35, :1305:29] wire [31:0] _id_stall_fpu_T_23 = _id_stall_fpu_r >> _GEN_9; // @[RocketCore.scala:1302:35, :1305:29] wire [31:0] _id_stall_fpu_T_26 = _id_stall_fpu_r >> _ibuf_io_inst_0_bits_inst_rs3; // @[RocketCore.scala:311:20, :1302:35, :1305:29] wire [31:0] _id_stall_fpu_T_29 = _id_stall_fpu_r >> _GEN_10; // @[RocketCore.scala:1302:35, :1305:29] reg dcache_blocked_blocked; // @[RocketCore.scala:1024:22] reg rocc_blocked; // @[RocketCore.scala:1028:25] wire _ctrl_stalld_T_32 = ex_reg_valid & (ex_ctrl_wxd & (hazard_targets_0_1 & _fp_data_hazard_ex_T_1 | hazard_targets_1_1 & _fp_data_hazard_ex_T_3 | hazard_targets_2_1 & _fp_data_hazard_ex_T_7) & ((|ex_ctrl_csr) | ex_ctrl_jalr | ex_ctrl_mem | ex_ctrl_mul | ex_ctrl_div | ex_ctrl_fp | ex_ctrl_rocc) | (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_67) & ex_ctrl_wfd & (io_fpu_dec_ren1 & _fp_data_hazard_ex_T_1 | io_fpu_dec_ren2 & _fp_data_hazard_ex_T_3 | io_fpu_dec_ren3 & _ibuf_io_inst_0_bits_inst_rs3 == ex_reg_inst[11:7] | io_fpu_dec_wen & _fp_data_hazard_ex_T_7)) | mem_reg_valid & (data_hazard_mem & ((|mem_ctrl_csr) | mem_ctrl_mem & mem_mem_cmd_bh | mem_ctrl_mul | mem_ctrl_div | mem_ctrl_fp | mem_ctrl_rocc | mem_ctrl_vec) | (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_67) & mem_ctrl_wfd & (io_fpu_dec_ren1 & _fp_data_hazard_mem_T_1 | io_fpu_dec_ren2 & _fp_data_hazard_mem_T_3 | io_fpu_dec_ren3 & _ibuf_io_inst_0_bits_inst_rs3 == mem_reg_inst[11:7] | io_fpu_dec_wen & _fp_data_hazard_mem_T_7)) | wb_reg_valid & (wb_ctrl_wxd & (hazard_targets_0_1 & _fp_data_hazard_wb_T_1 | hazard_targets_1_1 & _fp_data_hazard_wb_T_3 | hazard_targets_2_1 & _fp_data_hazard_wb_T_7) & wb_set_sboard | (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_67) & wb_ctrl_wfd & (io_fpu_dec_ren1 & _fp_data_hazard_wb_T_1 | io_fpu_dec_ren2 & _fp_data_hazard_wb_T_3 | io_fpu_dec_ren3 & _ibuf_io_inst_0_bits_inst_rs3 == wb_reg_inst[11:7] | io_fpu_dec_wen & _fp_data_hazard_wb_T_7)) | hazard_targets_0_1 & _id_sboard_hazard_T[0] & ~(ll_wen & ll_waddr == _ibuf_io_inst_0_bits_inst_rs1) | hazard_targets_1_1 & _id_sboard_hazard_T_7[0] & ~(ll_wen & ll_waddr == _ibuf_io_inst_0_bits_inst_rs2) | hazard_targets_2_1 & _id_sboard_hazard_T_14[0] & ~(ll_wen & ll_waddr == _ibuf_io_inst_0_bits_inst_rd) | _csr_io_singleStep & (ex_reg_valid | mem_reg_valid | wb_reg_valid) | id_csr_en & _csr_io_decode_0_fp_csr & ~io_fpu_fcsr_rdy | (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_67) & (io_fpu_dec_ren1 & _id_stall_fpu_T_20[0] | io_fpu_dec_ren2 & _id_stall_fpu_T_23[0] | io_fpu_dec_ren3 & _id_stall_fpu_T_26[0] | io_fpu_dec_wen & _id_stall_fpu_T_29[0]) | (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_29) & dcache_blocked_blocked & ~io_dmem_perf_grant | (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_12) & (~(_div_io_req_ready | _div_io_resp_valid & ~wb_wxd) | div_io_req_valid) | id_mem_busy & ((|_id_ctrl_decoder_decoded_orMatrixOutputs_T_2) & _ibuf_io_inst_0_bits_inst_bits[25] | (&_id_ctrl_decoder_decoded_andMatrixOutputs_T_33) | id_reg_fence & (|_id_ctrl_decoder_decoded_orMatrixOutputs_T_29)) | _csr_io_csr_stall | id_reg_pause; // @[pla.scala:98:{53,70}, :114:{19,36}] wire ctrl_killd = ~_ibuf_io_inst_0_valid | _ibuf_io_inst_0_bits_replay | ibuf_io_kill | _ctrl_stalld_T_32 | _csr_io_interrupt; // @[RocketCore.scala:307:35, :311:20, :341:19, :1032:{18,35,51}, :1033:23, :1034:74, :1036:61, :1037:32, :1039:34, :1041:15, :1042:17, :1043:22, :1046:{17,40,71,89,104}, :1287:50] reg io_imem_progress_REG; // @[RocketCore.scala:1059:30] wire io_ptw_sfence_valid_0 = wb_reg_valid & wb_reg_sfence; // @[RocketCore.scala:288:35, :294:26, :1060:40] wire _io_imem_btb_update_bits_cfiType_T_9 = mem_ctrl_jal | mem_ctrl_jalr; // @[RocketCore.scala:244:21, :1074:23] wire [38:0] _io_imem_btb_update_bits_br_pc_T_1 = mem_reg_pc[38:0] + {37'h0, ~mem_reg_rvc, 1'h0}; // @[RocketCore.scala:266:36, :277:23, :1079:{69,74}] wire [38:0] io_imem_bht_update_bits_pc_0 = {_io_imem_btb_update_bits_br_pc_T_1[38:2], 2'h0}; // @[RocketCore.scala:1079:69, :1080:66] assign io_dmem_req_valid_0 = ex_reg_valid & ex_ctrl_mem; // @[RocketCore.scala:243:20, :248:35, :1130:41] reg [63:0] coreMonitorBundle_rd0val_REG; // @[RocketCore.scala:1199:46] reg [63:0] coreMonitorBundle_rd0val_REG_1; // @[RocketCore.scala:1199:38] reg [63:0] coreMonitorBundle_rd1val_REG; // @[RocketCore.scala:1201:46] reg [63:0] coreMonitorBundle_rd1val_REG_1; // @[RocketCore.scala:1201:38]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_46( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [31:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7] wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54] wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule